summaryrefslogtreecommitdiffstats
path: root/src/seastar/dpdk/drivers/net/sfc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/seastar/dpdk/drivers/net/sfc
parentInitial commit. (diff)
downloadceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.tar.xz
ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/seastar/dpdk/drivers/net/sfc')
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/Makefile143
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/README36
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/ef10_ev.c1401
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/ef10_filter.c1501
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/ef10_impl.h1183
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/ef10_intr.c197
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/ef10_mac.c897
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/ef10_mcdi.c342
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/ef10_nic.c1780
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/ef10_nvram.c2385
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/ef10_phy.c631
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/ef10_rx.c965
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/ef10_tlv_layout.h941
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/ef10_tx.c710
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/ef10_vpd.c463
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx.h2535
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_bootcfg.c563
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_check.h346
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_crc32.c122
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_ev.c1470
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_filter.c1424
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_hash.c328
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_impl.h1208
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_intr.c572
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_lic.c1751
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_mac.c951
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_mcdi.c2346
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_mcdi.h415
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_mon.c255
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_nic.c1110
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_nvram.c1044
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_phy.c561
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_phy_ids.h51
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_port.c252
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_regs.h3870
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_regs_ef10.h571
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_regs_mcdi.h15690
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_regs_pci.h2356
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_rx.c1315
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_sram.c331
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_tx.c1097
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_types.h1647
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/efx_vpd.c1016
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/hunt_impl.h74
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/hunt_nic.c402
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/mcdi_mon.c565
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/mcdi_mon.h74
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/medford_impl.h67
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/medford_nic.c402
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/siena_flash.h215
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/siena_impl.h431
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/siena_mac.c476
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/siena_mcdi.c263
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/siena_nic.c585
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/siena_nvram.c734
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/siena_phy.c797
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/siena_sram.c178
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/base/siena_vpd.c618
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/efsys.h780
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/rte_pmd_sfc_efx_version.map4
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc.c750
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc.h322
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_debug.h59
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_dp.c100
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_dp.h125
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_dp_rx.h197
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_dp_tx.h170
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_ef10.h107
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_ef10_rx.c712
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_ef10_tx.c560
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_ethdev.c1642
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_ev.c921
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_ev.h129
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_filter.c137
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_filter.h62
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_flow.c1175
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_flow.h64
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_intr.c342
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_kvargs.c145
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_kvargs.h93
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_log.h76
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_mcdi.c331
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_port.c475
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_rx.c1327
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_rx.h180
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_tso.c201
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_tweak.h56
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_tx.c992
-rw-r--r--src/seastar/dpdk/drivers/net/sfc/sfc_tx.h164
89 files changed, 77051 insertions, 0 deletions
diff --git a/src/seastar/dpdk/drivers/net/sfc/Makefile b/src/seastar/dpdk/drivers/net/sfc/Makefile
new file mode 100644
index 00000000..57aa963b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/Makefile
@@ -0,0 +1,143 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2016-2017 Solarflare Communications Inc.
+# All rights reserved.
+#
+# This software was jointly developed between OKTET Labs (under contract
+# for Solarflare) and Solarflare Communications, Inc.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_sfc_efx.a
+
+CFLAGS += -I$(SRCDIR)/base/
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+# Strict-aliasing rules are violated by rte_eth_link to uint64_t casts
+CFLAGS += -Wno-strict-aliasing
+
+# Enable extra warnings
+CFLAGS += -Wextra
+
+# More warnings not enabled by above aggregators
+CFLAGS += -Wdisabled-optimization
+
+# Extra CFLAGS for base driver files
+CFLAGS_BASE_DRIVER += -Wno-sign-compare
+CFLAGS_BASE_DRIVER += -Wno-unused-parameter
+CFLAGS_BASE_DRIVER += -Wno-unused-variable
+
+# Compiler and version dependent flags
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS += -Waggregate-return
+CFLAGS += -Wnested-externs
+CFLAGS_BASE_DRIVER += -Wno-empty-body
+CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+CFLAGS += -Waggregate-return
+CFLAGS += -Wbad-function-cast
+CFLAGS_BASE_DRIVER += -Wno-empty-body
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
+endif
+
+#
+# List of base driver object files for which
+# special CFLAGS above should be applied
+#
+BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+$(foreach obj, $(BASE_DRIVER_OBJS), \
+ $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+EXPORT_MAP := rte_pmd_sfc_efx_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_kvargs.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_mcdi.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_intr.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_port.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_tso.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_dp.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_tx.c
+
+VPATH += $(SRCDIR)/base
+
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_bootcfg.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_crc32.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_ev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_hash.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_intr.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_lic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_mac.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_mcdi.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_mon.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_nic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_nvram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_port.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_sram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_vpd.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += mcdi_mon.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_mac.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_mcdi.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_nic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_nvram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_sram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_vpd.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_ev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_intr.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_mac.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_mcdi.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_nic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_nvram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_vpd.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += hunt_nic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += medford_nic.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/README b/src/seastar/dpdk/drivers/net/sfc/base/README
new file mode 100644
index 00000000..9019e8ba
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/README
@@ -0,0 +1,36 @@
+
+ Copyright (c) 2006-2016 Solarflare Communications Inc.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Solarflare libefx driver library
+================================
+
+This directory contains source code of Solarflare Communications libefx
+driver library of version v4.10.0.1012.
+
+Updating
+========
+
+The source code in this directory should not be modified.
+Please contact the driver maintainers to request changes.
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/ef10_ev.c b/src/seastar/dpdk/drivers/net/sfc/base/ef10_ev.c
new file mode 100644
index 00000000..35226749
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/ef10_ev.c
@@ -0,0 +1,1401 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_STATS
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_QSTATS
+#define EFX_EV_QSTAT_INCR(_eep, _stat) \
+ do { \
+ (_eep)->ee_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_EV_QSTAT_INCR(_eep, _stat)
+#endif
+
+/*
+ * Non-interrupting event queue requires interrrupting event queue to
+ * refer to for wake-up events even if wake ups are never used.
+ * It could be even non-allocated event queue.
+ */
+#define EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX (0)
+
+static __checkReturn boolean_t
+ef10_ev_rx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_tx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_driver(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_drv_gen(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_mcdi(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_set_evq_tmr(
+ __in efx_nic_t *enp,
+ __in uint32_t instance,
+ __in uint32_t mode,
+ __in uint32_t timer_ns)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN,
+ MC_CMD_SET_EVQ_TMR_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_EVQ_TMR;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
+ MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
+ MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
+ MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_init_evq(
+ __in efx_nic_t *enp,
+ __in unsigned int instance,
+ __in efsys_mem_t *esmp,
+ __in size_t nevs,
+ __in uint32_t irq,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in boolean_t low_latency)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[
+ MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
+ MC_CMD_INIT_EVQ_OUT_LEN)];
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ int npages;
+ int i;
+ boolean_t interrupting;
+ int ev_cut_through;
+ efx_rc_t rc;
+
+ npages = EFX_EVQ_NBUFS(nevs);
+ if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_INIT_EVQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
+
+ interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
+
+ /*
+ * On Huntington RX and TX event batching can only be requested together
+ * (even if the datapath firmware doesn't actually support RX
+ * batching). If event cut through is enabled no RX batching will occur.
+ *
+ * So always enable RX and TX event batching, and enable event cut
+ * through if we want low latency operation.
+ */
+ switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
+ case EFX_EVQ_FLAGS_TYPE_AUTO:
+ ev_cut_through = low_latency ? 1 : 0;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
+ ev_cut_through = 0;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
+ ev_cut_through = 1;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+ MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
+ INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting,
+ INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
+ INIT_EVQ_IN_FLAG_INT_ARMD, 0,
+ INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
+ INIT_EVQ_IN_FLAG_RX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_TX_MERGE, 1);
+
+ /* If the value is zero then disable the timer */
+ if (us == 0) {
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
+ } else {
+ unsigned int ticks;
+
+ if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
+ goto fail3;
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
+ }
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
+ MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
+
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail4;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail5;
+ }
+
+ /* NOTE: ignore the returned IRQ param as firmware does not set it. */
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_init_evq_v2(
+ __in efx_nic_t *enp,
+ __in unsigned int instance,
+ __in efsys_mem_t *esmp,
+ __in size_t nevs,
+ __in uint32_t irq,
+ __in uint32_t us,
+ __in uint32_t flags)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[
+ MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
+ MC_CMD_INIT_EVQ_V2_OUT_LEN)];
+ boolean_t interrupting;
+ unsigned int evq_type;
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ int npages;
+ int i;
+ efx_rc_t rc;
+
+ npages = EFX_EVQ_NBUFS(nevs);
+ if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_INIT_EVQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
+
+ interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
+
+ switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
+ case EFX_EVQ_FLAGS_TYPE_AUTO:
+ evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
+ evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
+ evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+ MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
+ INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
+ INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
+ INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
+ INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
+
+ /* If the value is zero then disable the timer */
+ if (us == 0) {
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
+ } else {
+ unsigned int ticks;
+
+ if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
+ goto fail3;
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
+ }
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
+ MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
+
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail4;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail5;
+ }
+
+ /* NOTE: ignore the returned IRQ param as firmware does not set it. */
+
+ EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
+ MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fini_evq(
+ __in efx_nic_t *enp,
+ __in uint32_t instance)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN,
+ MC_CMD_FINI_EVQ_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FINI_EVQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+
+ __checkReturn efx_rc_t
+ef10_ev_init(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+ return (0);
+}
+
+ void
+ef10_ev_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ __checkReturn efx_rc_t
+ef10_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in efx_evq_t *eep)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t irq;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
+
+ if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (index >= encp->enc_evq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if (us > encp->enc_evq_timer_max_us) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ /* Set up the handler table */
+ eep->ee_rx = ef10_ev_rx;
+ eep->ee_tx = ef10_ev_tx;
+ eep->ee_driver = ef10_ev_driver;
+ eep->ee_drv_gen = ef10_ev_drv_gen;
+ eep->ee_mcdi = ef10_ev_mcdi;
+
+ /* Set up the event queue */
+ /* INIT_EVQ expects function-relative vector number */
+ if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
+ irq = index;
+ } else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
+ irq = index;
+ flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
+ } else {
+ irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
+ }
+
+ /*
+ * Interrupts may be raised for events immediately after the queue is
+ * created. See bug58606.
+ */
+
+ if (encp->enc_init_evq_v2_supported) {
+ /*
+ * On Medford the low latency license is required to enable RX
+ * and event cut through and to disable RX batching. If event
+ * queue type in flags is auto, we let the firmware decide the
+ * settings to use. If the adapter has a low latency license,
+ * it will choose the best settings for low latency, otherwise
+ * it will choose the best settings for throughput.
+ */
+ rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us, flags);
+ if (rc != 0)
+ goto fail4;
+ } else {
+ /*
+ * On Huntington we need to specify the settings to use.
+ * If event queue type in flags is auto, we favour throughput
+ * if the adapter is running virtualization supporting firmware
+ * (i.e. the full featured firmware variant)
+ * and latency otherwise. The Ethernet Virtual Bridging
+ * capability is used to make this decision. (Note though that
+ * the low latency firmware variant is also best for
+ * throughput and corresponding type should be specified
+ * to choose it.)
+ */
+ boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
+ rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, flags,
+ low_latency);
+ if (rc != 0)
+ goto fail5;
+ }
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_ev_qdestroy(
+ __in efx_evq_t *eep)
+{
+ efx_nic_t *enp = eep->ee_enp;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index);
+}
+
+ __checkReturn efx_rc_t
+ef10_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t rptr;
+ efx_dword_t dword;
+
+ rptr = count & eep->ee_mask;
+
+ if (enp->en_nic_cfg.enc_bug35388_workaround) {
+ EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
+ (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
+ EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
+ (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
+
+ EFX_POPULATE_DWORD_2(dword,
+ ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
+ ERF_DD_EVQ_IND_RPTR,
+ (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
+ EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
+ &dword, B_FALSE);
+
+ EFX_POPULATE_DWORD_2(dword,
+ ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
+ ERF_DD_EVQ_IND_RPTR,
+ rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
+ EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
+ &dword, B_FALSE);
+ } else {
+ EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
+ EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
+ &dword, B_FALSE);
+ }
+
+ return (0);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_driver_event(
+ __in efx_nic_t *enp,
+ __in uint32_t evq,
+ __in efx_qword_t data)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN,
+ MC_CMD_DRIVER_EVENT_OUT_LEN)];
+ efx_rc_t rc;
+
+ req.emr_cmd = MC_CMD_DRIVER_EVENT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
+
+ MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
+ EFX_QWORD_FIELD(data, EFX_DWORD_0));
+ MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
+ EFX_QWORD_FIELD(data, EFX_DWORD_1));
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_qword_t event;
+
+ EFX_POPULATE_QWORD_3(event,
+ ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
+ ESF_DZ_DRV_SUB_CODE, 0,
+ ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
+
+ (void) efx_mcdi_driver_event(enp, eep->ee_index, event);
+}
+
+ __checkReturn efx_rc_t
+ef10_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_dword_t dword;
+ uint32_t mode;
+ efx_rc_t rc;
+
+ /* Check that hardware and MCDI use the same timer MODE values */
+ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
+ MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
+ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
+ MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
+ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
+ MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
+ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
+ MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
+
+ if (us > encp->enc_evq_timer_max_us) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* If the value is zero then disable the timer */
+ if (us == 0) {
+ mode = FFE_CZ_TIMER_MODE_DIS;
+ } else {
+ mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
+ }
+
+ if (encp->enc_bug61265_workaround) {
+ uint32_t ns = us * 1000;
+
+ rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
+ if (rc != 0)
+ goto fail2;
+ } else {
+ unsigned int ticks;
+
+ if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
+ goto fail3;
+
+ if (encp->enc_bug35388_workaround) {
+ EFX_POPULATE_DWORD_3(dword,
+ ERF_DD_EVQ_IND_TIMER_FLAGS,
+ EFE_DD_EVQ_IND_TIMER_FLAGS,
+ ERF_DD_EVQ_IND_TIMER_MODE, mode,
+ ERF_DD_EVQ_IND_TIMER_VAL, ticks);
+ EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT,
+ eep->ee_index, &dword, 0);
+ } else {
+ EFX_POPULATE_DWORD_2(dword,
+ ERF_DZ_TC_TIMER_MODE, mode,
+ ERF_DZ_TC_TIMER_VAL, ticks);
+ EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG,
+ eep->ee_index, &dword, 0);
+ }
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#if EFSYS_OPT_QSTATS
+ void
+ef10_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < EV_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
+ eep->ee_stat[id] = 0;
+ }
+}
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+static __checkReturn boolean_t
+ef10_ev_rx_packed_stream(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t label;
+ uint32_t next_read_lbits;
+ uint16_t flags;
+ boolean_t should_abort;
+ efx_evq_rxq_state_t *eersp;
+ unsigned int pkt_count;
+ unsigned int current_id;
+ boolean_t new_buffer;
+
+ next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
+ label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
+ new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE);
+
+ flags = 0;
+
+ eersp = &eep->ee_rxq_state[label];
+ pkt_count = (EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS) + 1 +
+ next_read_lbits - eersp->eers_rx_stream_npackets) &
+ EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+ eersp->eers_rx_stream_npackets += pkt_count;
+
+ if (new_buffer) {
+ flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER;
+ if (eersp->eers_rx_packed_stream_credits <
+ EFX_RX_PACKED_STREAM_MAX_CREDITS)
+ eersp->eers_rx_packed_stream_credits++;
+ eersp->eers_rx_read_ptr++;
+ }
+ current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask;
+
+ /* Check for errors that invalidate checksum and L3/L4 fields */
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
+ /* RX frame truncated (error flag is misnamed) */
+ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
+ flags |= EFX_DISCARD;
+ goto deliver;
+ }
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
+ /* Bad Ethernet frame CRC */
+ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
+ flags |= EFX_DISCARD;
+ goto deliver;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
+ flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE;
+ goto deliver;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR))
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR))
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
+
+deliver:
+ /* If we're not discarding the packet then it is ok */
+ if (~flags & EFX_DISCARD)
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
+
+ EFSYS_ASSERT(eecp->eec_rx_ps != NULL);
+ should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count,
+ flags);
+
+ return (should_abort);
+}
+
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+
+static __checkReturn boolean_t
+ef10_ev_rx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t size;
+ uint32_t label;
+ uint32_t mac_class;
+ uint32_t eth_tag_class;
+ uint32_t l3_class;
+ uint32_t l4_class;
+ uint32_t next_read_lbits;
+ uint16_t flags;
+ boolean_t cont;
+ boolean_t should_abort;
+ efx_evq_rxq_state_t *eersp;
+ unsigned int desc_count;
+ unsigned int last_used_id;
+
+ EFX_EV_QSTAT_INCR(eep, EV_RX);
+
+ /* Discard events after RXQ/TXQ errors */
+ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
+ return (B_FALSE);
+
+ /* Basic packet information */
+ label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
+ eersp = &eep->ee_rxq_state[label];
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+ /*
+ * Packed stream events are very different,
+ * so handle them separately
+ */
+ if (eersp->eers_rx_packed_stream)
+ return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg));
+#endif
+
+ size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
+ next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
+ eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
+ mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
+ l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
+ l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS);
+ cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
+ /* Drop this event */
+ return (B_FALSE);
+ }
+ flags = 0;
+
+ if (cont != 0) {
+ /*
+ * This may be part of a scattered frame, or it may be a
+ * truncated frame if scatter is disabled on this RXQ.
+ * Overlength frames can be received if e.g. a VF is configured
+ * for 1500 MTU but connected to a port set to 9000 MTU
+ * (see bug56567).
+ * FIXME: There is not yet any driver that supports scatter on
+ * Huntington. Scatter support is required for OSX.
+ */
+ flags |= EFX_PKT_CONT;
+ }
+
+ if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
+ flags |= EFX_PKT_UNICAST;
+
+ /* Increment the count of descriptors read */
+ desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
+ EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+ eersp->eers_rx_read_ptr += desc_count;
+
+ /*
+ * FIXME: add error checking to make sure this a batched event.
+ * This could also be an aborted scatter, see Bug36629.
+ */
+ if (desc_count > 1) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
+ flags |= EFX_PKT_PREFIX_LEN;
+ }
+
+ /* Calculate the index of the last descriptor consumed */
+ last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
+
+ /* Check for errors that invalidate checksum and L3/L4 fields */
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
+ /* RX frame truncated (error flag is misnamed) */
+ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
+ flags |= EFX_DISCARD;
+ goto deliver;
+ }
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
+ /* Bad Ethernet frame CRC */
+ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
+ flags |= EFX_DISCARD;
+ goto deliver;
+ }
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
+ /*
+ * Hardware parse failed, due to malformed headers
+ * or headers that are too long for the parser.
+ * Headers and checksums must be validated by the host.
+ */
+ /* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */
+ goto deliver;
+ }
+
+ if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
+ (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
+ flags |= EFX_PKT_VLAN_TAGGED;
+ }
+
+ switch (l3_class) {
+ case ESE_DZ_L3_CLASS_IP4:
+ case ESE_DZ_L3_CLASS_IP4_FRAG:
+ flags |= EFX_PKT_IPV4;
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
+ } else {
+ flags |= EFX_CKSUM_IPV4;
+ }
+
+ if (l4_class == ESE_DZ_L4_CLASS_TCP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
+ flags |= EFX_PKT_TCP;
+ } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
+ flags |= EFX_PKT_UDP;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
+ }
+ break;
+
+ case ESE_DZ_L3_CLASS_IP6:
+ case ESE_DZ_L3_CLASS_IP6_FRAG:
+ flags |= EFX_PKT_IPV6;
+
+ if (l4_class == ESE_DZ_L4_CLASS_TCP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
+ flags |= EFX_PKT_TCP;
+ } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
+ flags |= EFX_PKT_UDP;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
+ }
+ break;
+
+ default:
+ EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
+ break;
+ }
+
+ if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
+ } else {
+ flags |= EFX_CKSUM_TCPUDP;
+ }
+ }
+
+deliver:
+ /* If we're not discarding the packet then it is ok */
+ if (~flags & EFX_DISCARD)
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
+
+ EFSYS_ASSERT(eecp->eec_rx != NULL);
+ should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_tx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t id;
+ uint32_t label;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_TX);
+
+ /* Discard events after RXQ/TXQ errors */
+ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
+ return (B_FALSE);
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
+ /* Drop this event */
+ return (B_FALSE);
+ }
+
+ /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
+ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
+ label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
+
+ EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
+
+ EFSYS_ASSERT(eecp->eec_tx != NULL);
+ should_abort = eecp->eec_tx(arg, label, id);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_driver(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ unsigned int code;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
+ should_abort = B_FALSE;
+
+ code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
+ switch (code) {
+ case ESE_DZ_DRV_TIMER_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
+
+ EFSYS_ASSERT(eecp->eec_timer != NULL);
+ should_abort = eecp->eec_timer(arg, id);
+ break;
+ }
+
+ case ESE_DZ_DRV_WAKE_UP_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
+
+ EFSYS_ASSERT(eecp->eec_wake_up != NULL);
+ should_abort = eecp->eec_wake_up(arg, id);
+ break;
+ }
+
+ case ESE_DZ_DRV_START_UP_EV:
+ EFSYS_ASSERT(eecp->eec_initialized != NULL);
+ should_abort = eecp->eec_initialized(arg);
+ break;
+
+ default:
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+ break;
+ }
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_drv_gen(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t data;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
+ should_abort = B_FALSE;
+
+ data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
+ if (data >= ((uint32_t)1 << 16)) {
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+ return (B_TRUE);
+ }
+
+ EFSYS_ASSERT(eecp->eec_software != NULL);
+ should_abort = eecp->eec_software(arg, (uint16_t)data);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_mcdi(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ unsigned int code;
+ boolean_t should_abort = B_FALSE;
+
+ EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
+
+ code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
+ switch (code) {
+ case MCDI_EVENT_CODE_BADSSERT:
+ efx_mcdi_ev_death(enp, EINTR);
+ break;
+
+ case MCDI_EVENT_CODE_CMDDONE:
+ efx_mcdi_ev_cpl(enp,
+ MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
+ MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
+ MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
+ break;
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ case MCDI_EVENT_CODE_PROXY_RESPONSE:
+ /*
+ * This event notifies a function that an authorization request
+ * has been processed. If the request was authorized then the
+ * function can now re-send the original MCDI request.
+ * See SF-113652-SW "SR-IOV Proxied Network Access Control".
+ */
+ efx_mcdi_ev_proxy_response(enp,
+ MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
+ MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
+ break;
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+ case MCDI_EVENT_CODE_LINKCHANGE: {
+ efx_link_mode_t link_mode;
+
+ ef10_phy_link_ev(enp, eqp, &link_mode);
+ should_abort = eecp->eec_link_change(arg, link_mode);
+ break;
+ }
+
+ case MCDI_EVENT_CODE_SENSOREVT: {
+#if EFSYS_OPT_MON_STATS
+ efx_mon_stat_t id;
+ efx_mon_stat_value_t value;
+ efx_rc_t rc;
+
+ /* Decode monitor stat for MCDI sensor (if supported) */
+ if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
+ /* Report monitor stat change */
+ should_abort = eecp->eec_monitor(arg, id, value);
+ } else if (rc == ENOTSUP) {
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_SENSOREVT,
+ MCDI_EV_FIELD(eqp, DATA));
+ } else {
+ EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
+ }
+#endif
+ break;
+ }
+
+ case MCDI_EVENT_CODE_SCHEDERR:
+ /* Informational only */
+ break;
+
+ case MCDI_EVENT_CODE_REBOOT:
+ /* Falcon/Siena only (should not been seen with Huntington). */
+ efx_mcdi_ev_death(enp, EIO);
+ break;
+
+ case MCDI_EVENT_CODE_MC_REBOOT:
+ /* MC_REBOOT event is used for Huntington (EF10) and later. */
+ efx_mcdi_ev_death(enp, EIO);
+ break;
+
+ case MCDI_EVENT_CODE_MAC_STATS_DMA:
+#if EFSYS_OPT_MAC_STATS
+ if (eecp->eec_mac_stats != NULL) {
+ eecp->eec_mac_stats(arg,
+ MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
+ }
+#endif
+ break;
+
+ case MCDI_EVENT_CODE_FWALERT: {
+ uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
+
+ if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_FWALERT_SRAM,
+ MCDI_EV_FIELD(eqp, FWALERT_DATA));
+ else
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_FWALERT,
+ MCDI_EV_FIELD(eqp, DATA));
+ break;
+ }
+
+ case MCDI_EVENT_CODE_TX_ERR: {
+ /*
+ * After a TXQ error is detected, firmware sends a TX_ERR event.
+ * This may be followed by TX completions (which we discard),
+ * and then finally by a TX_FLUSH event. Firmware destroys the
+ * TXQ automatically after sending the TX_FLUSH event.
+ */
+ enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
+
+ EFSYS_PROBE2(tx_descq_err,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+ /* Inform the driver that a reset is required. */
+ eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
+ MCDI_EV_FIELD(eqp, TX_ERR_DATA));
+ break;
+ }
+
+ case MCDI_EVENT_CODE_TX_FLUSH: {
+ uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
+
+ /*
+ * EF10 firmware sends two TX_FLUSH events: one to the txq's
+ * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
+ * We want to wait for all completions, so ignore the events
+ * with TX_FLUSH_TO_DRIVER.
+ */
+ if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
+ should_abort = B_FALSE;
+ break;
+ }
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
+
+ EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
+
+ EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
+ should_abort = eecp->eec_txq_flush_done(arg, txq_index);
+ break;
+ }
+
+ case MCDI_EVENT_CODE_RX_ERR: {
+ /*
+ * After an RXQ error is detected, firmware sends an RX_ERR
+ * event. This may be followed by RX events (which we discard),
+ * and then finally by an RX_FLUSH event. Firmware destroys the
+ * RXQ automatically after sending the RX_FLUSH event.
+ */
+ enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
+
+ EFSYS_PROBE2(rx_descq_err,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+ /* Inform the driver that a reset is required. */
+ eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
+ MCDI_EV_FIELD(eqp, RX_ERR_DATA));
+ break;
+ }
+
+ case MCDI_EVENT_CODE_RX_FLUSH: {
+ uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
+
+ /*
+ * EF10 firmware sends two RX_FLUSH events: one to the rxq's
+ * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
+ * We want to wait for all completions, so ignore the events
+ * with RX_FLUSH_TO_DRIVER.
+ */
+ if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
+ should_abort = B_FALSE;
+ break;
+ }
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
+
+ EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
+
+ EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
+ should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
+ break;
+ }
+
+ default:
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+ break;
+ }
+
+ return (should_abort);
+}
+
+ void
+ef10_ev_rxlabel_init(
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp,
+ __in unsigned int label,
+ __in boolean_t packed_stream)
+{
+ efx_evq_rxq_state_t *eersp;
+
+ EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
+ eersp = &eep->ee_rxq_state[label];
+
+ EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+ /*
+ * For packed stream modes, the very first event will
+ * have a new buffer flag set, so it will be incremented,
+ * yielding the correct pointer. That results in a simpler
+ * code than trying to detect start-of-the-world condition
+ * in the event handler.
+ */
+ eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0;
+#else
+ eersp->eers_rx_read_ptr = 0;
+#endif
+ eersp->eers_rx_mask = erp->er_mask;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ eersp->eers_rx_stream_npackets = 0;
+ eersp->eers_rx_packed_stream = packed_stream;
+ if (packed_stream) {
+ eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) /
+ (EFX_RX_PACKED_STREAM_MEM_PER_CREDIT /
+ EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE);
+ EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0);
+ /*
+ * A single credit is allocated to the queue when it is started.
+ * It is immediately spent by the first packet which has NEW
+ * BUFFER flag set, though, but still we shall take into
+ * account, as to not wrap around the maximum number of credits
+ * accidentally
+ */
+ eersp->eers_rx_packed_stream_credits--;
+ EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=,
+ EFX_RX_PACKED_STREAM_MAX_CREDITS);
+ }
+#else
+ EFSYS_ASSERT(!packed_stream);
+#endif
+}
+
+ void
+ef10_ev_rxlabel_fini(
+ __in efx_evq_t *eep,
+ __in unsigned int label)
+{
+ efx_evq_rxq_state_t *eersp;
+
+ EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
+ eersp = &eep->ee_rxq_state[label];
+
+ EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
+
+ eersp->eers_rx_read_ptr = 0;
+ eersp->eers_rx_mask = 0;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ eersp->eers_rx_stream_npackets = 0;
+ eersp->eers_rx_packed_stream = B_FALSE;
+ eersp->eers_rx_packed_stream_credits = 0;
+#endif
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/ef10_filter.c b/src/seastar/dpdk/drivers/net/sfc/base/ef10_filter.c
new file mode 100644
index 00000000..695bb847
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/ef10_filter.c
@@ -0,0 +1,1501 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_FILTER
+
+#define EFE_SPEC(eftp, index) ((eftp)->eft_entry[(index)].efe_spec)
+
+static efx_filter_spec_t *
+ef10_filter_entry_spec(
+ __in const ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ return ((efx_filter_spec_t *)(EFE_SPEC(eftp, index) &
+ ~(uintptr_t)EFX_EF10_FILTER_FLAGS));
+}
+
+static boolean_t
+ef10_filter_entry_is_busy(
+ __in const ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_BUSY)
+ return (B_TRUE);
+ else
+ return (B_FALSE);
+}
+
+static boolean_t
+ef10_filter_entry_is_auto_old(
+ __in const ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_AUTO_OLD)
+ return (B_TRUE);
+ else
+ return (B_FALSE);
+}
+
+static void
+ef10_filter_set_entry(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index,
+ __in_opt const efx_filter_spec_t *efsp)
+{
+ EFE_SPEC(eftp, index) = (uintptr_t)efsp;
+}
+
+static void
+ef10_filter_set_entry_busy(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_BUSY;
+}
+
+static void
+ef10_filter_set_entry_not_busy(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_BUSY;
+}
+
+static void
+ef10_filter_set_entry_auto_old(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL);
+ EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD;
+}
+
+static void
+ef10_filter_set_entry_not_auto_old(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_init(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *eftp;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+#define MATCH_MASK(match) (EFX_MASK32(match) << EFX_LOW_BIT(match))
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_HOST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_IP));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_HOST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_IP));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_MAC ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_PORT ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_MAC ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_MAC));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_PORT ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_PORT));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_ETHER_TYPE ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_INNER_VID ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_OUTER_VID ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IP_PROTO ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_UNKNOWN_MCAST_DST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST));
+ EFX_STATIC_ASSERT((uint32_t)EFX_FILTER_MATCH_UNKNOWN_UCAST_DST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST));
+#undef MATCH_MASK
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (ef10_filter_table_t), eftp);
+
+ if (!eftp) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ enp->en_filter.ef_ef10_filter_table = eftp;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_filter_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_filter.ef_ef10_filter_table != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (ef10_filter_table_t),
+ enp->en_filter.ef_ef10_filter_table);
+ }
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_filter_op_add(
+ __in efx_nic_t *enp,
+ __in efx_filter_spec_t *spec,
+ __in unsigned int filter_op,
+ __inout ef10_filter_handle_t *handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FILTER_OP_IN_LEN,
+ MC_CMD_FILTER_OP_OUT_LEN)];
+ efx_rc_t rc;
+
+ memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FILTER_OP;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FILTER_OP_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FILTER_OP_OUT_LEN;
+
+ switch (filter_op) {
+ case MC_CMD_FILTER_OP_IN_OP_REPLACE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO,
+ handle->efh_lo);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI,
+ handle->efh_hi);
+ /* Fall through */
+ case MC_CMD_FILTER_OP_IN_OP_INSERT:
+ case MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP, filter_op);
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_PORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_MATCH_FIELDS,
+ spec->efs_match_flags);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_DEST,
+ MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_QUEUE,
+ spec->efs_dmaq_id);
+ if (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) {
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_CONTEXT,
+ spec->efs_rss_context);
+ }
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_MODE,
+ spec->efs_flags & EFX_FILTER_FLAG_RX_RSS ?
+ MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
+ MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_TX_DEST,
+ MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
+
+ if (filter_op != MC_CMD_FILTER_OP_IN_OP_REPLACE) {
+ /*
+ * NOTE: Unlike most MCDI requests, the filter fields
+ * are presented in network (big endian) byte order.
+ */
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_MAC),
+ spec->efs_rem_mac, EFX_MAC_ADDR_LEN);
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_MAC),
+ spec->efs_loc_mac, EFX_MAC_ADDR_LEN);
+
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_SRC_PORT,
+ __CPU_TO_BE_16(spec->efs_rem_port));
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_DST_PORT,
+ __CPU_TO_BE_16(spec->efs_loc_port));
+
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_ETHER_TYPE,
+ __CPU_TO_BE_16(spec->efs_ether_type));
+
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_INNER_VLAN,
+ __CPU_TO_BE_16(spec->efs_inner_vid));
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_OUTER_VLAN,
+ __CPU_TO_BE_16(spec->efs_outer_vid));
+
+ /* IP protocol (in low byte, high byte is zero) */
+ MCDI_IN_SET_BYTE(req, FILTER_OP_IN_IP_PROTO,
+ spec->efs_ip_proto);
+
+ EFX_STATIC_ASSERT(sizeof (spec->efs_rem_host) ==
+ MC_CMD_FILTER_OP_IN_SRC_IP_LEN);
+ EFX_STATIC_ASSERT(sizeof (spec->efs_loc_host) ==
+ MC_CMD_FILTER_OP_IN_DST_IP_LEN);
+
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_IP),
+ &spec->efs_rem_host.eo_byte[0],
+ MC_CMD_FILTER_OP_IN_SRC_IP_LEN);
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_IP),
+ &spec->efs_loc_host.eo_byte[0],
+ MC_CMD_FILTER_OP_IN_DST_IP_LEN);
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ handle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_LO);
+ handle->efh_hi = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_HI);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_filter_op_delete(
+ __in efx_nic_t *enp,
+ __in unsigned int filter_op,
+ __inout ef10_filter_handle_t *handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FILTER_OP_IN_LEN,
+ MC_CMD_FILTER_OP_OUT_LEN)];
+ efx_rc_t rc;
+
+ memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FILTER_OP;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FILTER_OP_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FILTER_OP_OUT_LEN;
+
+ switch (filter_op) {
+ case MC_CMD_FILTER_OP_IN_OP_REMOVE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_REMOVE);
+ break;
+ case MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO, handle->efh_lo);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI, handle->efh_hi);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn boolean_t
+ef10_filter_equal(
+ __in const efx_filter_spec_t *left,
+ __in const efx_filter_spec_t *right)
+{
+ /* FIXME: Consider rx vs tx filters (look at efs_flags) */
+ if (left->efs_match_flags != right->efs_match_flags)
+ return (B_FALSE);
+ if (!EFX_OWORD_IS_EQUAL(left->efs_rem_host, right->efs_rem_host))
+ return (B_FALSE);
+ if (!EFX_OWORD_IS_EQUAL(left->efs_loc_host, right->efs_loc_host))
+ return (B_FALSE);
+ if (memcmp(left->efs_rem_mac, right->efs_rem_mac, EFX_MAC_ADDR_LEN))
+ return (B_FALSE);
+ if (memcmp(left->efs_loc_mac, right->efs_loc_mac, EFX_MAC_ADDR_LEN))
+ return (B_FALSE);
+ if (left->efs_rem_port != right->efs_rem_port)
+ return (B_FALSE);
+ if (left->efs_loc_port != right->efs_loc_port)
+ return (B_FALSE);
+ if (left->efs_inner_vid != right->efs_inner_vid)
+ return (B_FALSE);
+ if (left->efs_outer_vid != right->efs_outer_vid)
+ return (B_FALSE);
+ if (left->efs_ether_type != right->efs_ether_type)
+ return (B_FALSE);
+ if (left->efs_ip_proto != right->efs_ip_proto)
+ return (B_FALSE);
+
+ return (B_TRUE);
+
+}
+
+static __checkReturn boolean_t
+ef10_filter_same_dest(
+ __in const efx_filter_spec_t *left,
+ __in const efx_filter_spec_t *right)
+{
+ if ((left->efs_flags & EFX_FILTER_FLAG_RX_RSS) &&
+ (right->efs_flags & EFX_FILTER_FLAG_RX_RSS)) {
+ if (left->efs_rss_context == right->efs_rss_context)
+ return (B_TRUE);
+ } else if ((~(left->efs_flags) & EFX_FILTER_FLAG_RX_RSS) &&
+ (~(right->efs_flags) & EFX_FILTER_FLAG_RX_RSS)) {
+ if (left->efs_dmaq_id == right->efs_dmaq_id)
+ return (B_TRUE);
+ }
+ return (B_FALSE);
+}
+
+static __checkReturn uint32_t
+ef10_filter_hash(
+ __in efx_filter_spec_t *spec)
+{
+ EFX_STATIC_ASSERT((sizeof (efx_filter_spec_t) % sizeof (uint32_t))
+ == 0);
+ EFX_STATIC_ASSERT((EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid) %
+ sizeof (uint32_t)) == 0);
+
+ /*
+ * As the area of the efx_filter_spec_t we need to hash is DWORD
+ * aligned and an exact number of DWORDs in size we can use the
+ * optimised efx_hash_dwords() rather than efx_hash_bytes()
+ */
+ return (efx_hash_dwords((const uint32_t *)&spec->efs_outer_vid,
+ (sizeof (efx_filter_spec_t) -
+ EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid)) /
+ sizeof (uint32_t), 0));
+}
+
+/*
+ * Decide whether a filter should be exclusive or else should allow
+ * delivery to additional recipients. Currently we decide that
+ * filters for specific local unicast MAC and IP addresses are
+ * exclusive.
+ */
+static __checkReturn boolean_t
+ef10_filter_is_exclusive(
+ __in efx_filter_spec_t *spec)
+{
+ if ((spec->efs_match_flags & EFX_FILTER_MATCH_LOC_MAC) &&
+ !EFX_MAC_ADDR_IS_MULTICAST(spec->efs_loc_mac))
+ return (B_TRUE);
+
+ if ((spec->efs_match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV4) &&
+ ((spec->efs_loc_host.eo_u8[0] & 0xf) != 0xe))
+ return (B_TRUE);
+ if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV6) &&
+ (spec->efs_loc_host.eo_u8[0] != 0xff))
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_restore(
+ __in efx_nic_t *enp)
+{
+ int tbl_id;
+ efx_filter_spec_t *spec;
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ boolean_t restoring;
+ efsys_lock_state_t state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ for (tbl_id = 0; tbl_id < EFX_EF10_FILTER_TBL_ROWS; tbl_id++) {
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ spec = ef10_filter_entry_spec(eftp, tbl_id);
+ if (spec == NULL) {
+ restoring = B_FALSE;
+ } else if (ef10_filter_entry_is_busy(eftp, tbl_id)) {
+ /* Ignore busy entries. */
+ restoring = B_FALSE;
+ } else {
+ ef10_filter_set_entry_busy(eftp, tbl_id);
+ restoring = B_TRUE;
+ }
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if (restoring == B_FALSE)
+ continue;
+
+ if (ef10_filter_is_exclusive(spec)) {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_INSERT,
+ &eftp->eft_entry[tbl_id].efe_handle);
+ } else {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE,
+ &eftp->eft_entry[tbl_id].efe_handle);
+ }
+
+ if (rc != 0)
+ goto fail1;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ ef10_filter_set_entry_not_busy(eftp, tbl_id);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * An arbitrary search limit for the software hash table. As per the linux net
+ * driver.
+ */
+#define EF10_FILTER_SEARCH_LIMIT 200
+
+static __checkReturn efx_rc_t
+ef10_filter_add_internal(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace,
+ __out_opt uint32_t *filter_id)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t *saved_spec;
+ uint32_t hash;
+ unsigned int depth;
+ int ins_index;
+ boolean_t replacing = B_FALSE;
+ unsigned int i;
+ efsys_lock_state_t state;
+ boolean_t locked = B_FALSE;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+#if EFSYS_OPT_RX_SCALE
+ spec->efs_rss_context = enp->en_rss_context;
+#endif
+
+ hash = ef10_filter_hash(spec);
+
+ /*
+ * FIXME: Add support for inserting filters of different priorities
+ * and removing lower priority multicast filters (bug 42378)
+ */
+
+ /*
+ * Find any existing filters with the same match tuple or
+ * else a free slot to insert at. If any of them are busy,
+ * we have to wait and retry.
+ */
+ for (;;) {
+ ins_index = -1;
+ depth = 1;
+ EFSYS_LOCK(enp->en_eslp, state);
+ locked = B_TRUE;
+
+ for (;;) {
+ i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1);
+ saved_spec = ef10_filter_entry_spec(eftp, i);
+
+ if (!saved_spec) {
+ if (ins_index < 0) {
+ ins_index = i;
+ }
+ } else if (ef10_filter_equal(spec, saved_spec)) {
+ if (ef10_filter_entry_is_busy(eftp, i))
+ break;
+ if (saved_spec->efs_priority
+ == EFX_FILTER_PRI_AUTO) {
+ ins_index = i;
+ goto found;
+ } else if (ef10_filter_is_exclusive(spec)) {
+ if (may_replace) {
+ ins_index = i;
+ goto found;
+ } else {
+ rc = EEXIST;
+ goto fail1;
+ }
+ }
+
+ /* Leave existing */
+ }
+
+ /*
+ * Once we reach the maximum search depth, use
+ * the first suitable slot or return EBUSY if
+ * there was none.
+ */
+ if (depth == EF10_FILTER_SEARCH_LIMIT) {
+ if (ins_index < 0) {
+ rc = EBUSY;
+ goto fail2;
+ }
+ goto found;
+ }
+ depth++;
+ }
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+ }
+
+found:
+ /*
+ * Create a software table entry if necessary, and mark it
+ * busy. We might yet fail to insert, but any attempt to
+ * insert a conflicting filter while we're waiting for the
+ * firmware must find the busy entry.
+ */
+ saved_spec = ef10_filter_entry_spec(eftp, ins_index);
+ if (saved_spec) {
+ if (saved_spec->efs_priority == EFX_FILTER_PRI_AUTO) {
+ /* This is a filter we are refreshing */
+ ef10_filter_set_entry_not_auto_old(eftp, ins_index);
+ goto out_unlock;
+
+ }
+ replacing = B_TRUE;
+ } else {
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*spec), saved_spec);
+ if (!saved_spec) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+ *saved_spec = *spec;
+ ef10_filter_set_entry(eftp, ins_index, saved_spec);
+ }
+ ef10_filter_set_entry_busy(eftp, ins_index);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+
+ /*
+ * On replacing the filter handle may change after after a successful
+ * replace operation.
+ */
+ if (replacing) {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_REPLACE,
+ &eftp->eft_entry[ins_index].efe_handle);
+ } else if (ef10_filter_is_exclusive(spec)) {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_INSERT,
+ &eftp->eft_entry[ins_index].efe_handle);
+ } else {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE,
+ &eftp->eft_entry[ins_index].efe_handle);
+ }
+
+ if (rc != 0)
+ goto fail4;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+ locked = B_TRUE;
+
+ if (replacing) {
+ /* Update the fields that may differ */
+ saved_spec->efs_priority = spec->efs_priority;
+ saved_spec->efs_flags = spec->efs_flags;
+ saved_spec->efs_rss_context = spec->efs_rss_context;
+ saved_spec->efs_dmaq_id = spec->efs_dmaq_id;
+ }
+
+ ef10_filter_set_entry_not_busy(eftp, ins_index);
+
+out_unlock:
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+
+ if (filter_id)
+ *filter_id = ins_index;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+ if (!replacing) {
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), saved_spec);
+ saved_spec = NULL;
+ }
+ ef10_filter_set_entry_not_busy(eftp, ins_index);
+ ef10_filter_set_entry(eftp, ins_index, NULL);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ if (locked)
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace)
+{
+ efx_rc_t rc;
+
+ rc = ef10_filter_add_internal(enp, spec, may_replace, NULL);
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+ef10_filter_delete_internal(
+ __in efx_nic_t *enp,
+ __in uint32_t filter_id)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t *spec;
+ efsys_lock_state_t state;
+ uint32_t filter_idx = filter_id % EFX_EF10_FILTER_TBL_ROWS;
+
+ /*
+ * Find the software table entry and mark it busy. Don't
+ * remove it yet; any attempt to update while we're waiting
+ * for the firmware must find the busy entry.
+ *
+ * FIXME: What if the busy flag is never cleared?
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ while (ef10_filter_entry_is_busy(table, filter_idx)) {
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ EFSYS_SPIN(1);
+ EFSYS_LOCK(enp->en_eslp, state);
+ }
+ if ((spec = ef10_filter_entry_spec(table, filter_idx)) != NULL) {
+ ef10_filter_set_entry_busy(table, filter_idx);
+ }
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if (spec == NULL) {
+ rc = ENOENT;
+ goto fail1;
+ }
+
+ /*
+ * Try to remove the hardware filter. This may fail if the MC has
+ * rebooted (which frees all hardware filter resources).
+ */
+ if (ef10_filter_is_exclusive(spec)) {
+ rc = efx_mcdi_filter_op_delete(enp,
+ MC_CMD_FILTER_OP_IN_OP_REMOVE,
+ &table->eft_entry[filter_idx].efe_handle);
+ } else {
+ rc = efx_mcdi_filter_op_delete(enp,
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE,
+ &table->eft_entry[filter_idx].efe_handle);
+ }
+
+ /* Free the software table entry */
+ EFSYS_LOCK(enp->en_eslp, state);
+ ef10_filter_set_entry_not_busy(table, filter_idx);
+ ef10_filter_set_entry(table, filter_idx, NULL);
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), spec);
+
+ /* Check result of hardware filter removal */
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t *saved_spec;
+ unsigned int hash;
+ unsigned int depth;
+ unsigned int i;
+ efsys_lock_state_t state;
+ boolean_t locked = B_FALSE;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ hash = ef10_filter_hash(spec);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+ locked = B_TRUE;
+
+ depth = 1;
+ for (;;) {
+ i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1);
+ saved_spec = ef10_filter_entry_spec(table, i);
+ if (saved_spec && ef10_filter_equal(spec, saved_spec) &&
+ ef10_filter_same_dest(spec, saved_spec)) {
+ break;
+ }
+ if (depth == EF10_FILTER_SEARCH_LIMIT) {
+ rc = ENOENT;
+ goto fail1;
+ }
+ depth++;
+ }
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+
+ rc = ef10_filter_delete_internal(enp, i);
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ if (locked)
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_get_parser_disp_info(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PARSER_DISP_INFO_IN_LEN,
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX)];
+ size_t matches_count;
+ size_t list_size;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PARSER_DISP_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PARSER_DISP_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP,
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ matches_count = MCDI_OUT_DWORD(req,
+ GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES);
+
+ if (req.emr_out_length_used <
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(matches_count)) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *list_lengthp = matches_count;
+
+ if (buffer_length < matches_count) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+
+ /*
+ * Check that the elements in the list in the MCDI response are the size
+ * we expect, so we can just copy them directly. Any conversion of the
+ * flags is handled by the caller.
+ */
+ EFX_STATIC_ASSERT(sizeof (uint32_t) ==
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN);
+
+ list_size = matches_count *
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN;
+ memcpy(buffer,
+ MCDI_OUT2(req, uint32_t,
+ GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES),
+ list_size);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp)
+{
+
+ size_t mcdi_list_length;
+ size_t list_length;
+ uint32_t i;
+ efx_rc_t rc;
+ uint32_t all_filter_flags =
+ (EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_LOC_HOST |
+ EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_REM_PORT |
+ EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_INNER_VID |
+ EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_UNKNOWN_MCAST_DST |
+ EFX_FILTER_MATCH_UNKNOWN_UCAST_DST);
+
+ rc = efx_mcdi_get_parser_disp_info(enp, buffer, buffer_length,
+ &mcdi_list_length);
+ if (rc != 0) {
+ if (rc == ENOSPC) {
+ /* Pass through mcdi_list_length for the list length */
+ *list_lengthp = mcdi_list_length;
+ }
+ goto fail1;
+ }
+
+ /*
+ * The static assertions in ef10_filter_init() ensure that the values of
+ * the EFX_FILTER_MATCH flags match those used by MCDI, so they don't
+ * need to be converted.
+ *
+ * In case support is added to MCDI for additional flags, remove any
+ * matches from the list which include flags we don't support. The order
+ * of the matches is preserved as they are ordered from highest to
+ * lowest priority.
+ */
+ EFSYS_ASSERT(mcdi_list_length <= buffer_length);
+ list_length = 0;
+ for (i = 0; i < mcdi_list_length; i++) {
+ if ((buffer[i] & ~all_filter_flags) == 0) {
+ buffer[list_length] = buffer[i];
+ list_length++;
+ }
+ }
+
+ *list_lengthp = list_length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_unicast(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *addr,
+ __in efx_filter_flags_t filter_flags)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ efx_rc_t rc;
+
+ /* Insert the filter for the local station address */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+ efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, addr);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);
+ if (rc != 0)
+ goto fail1;
+
+ eftp->eft_unicst_filter_count++;
+ EFSYS_ASSERT(eftp->eft_unicst_filter_count <=
+ EFX_EF10_FILTER_UNICAST_FILTERS_MAX);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_all_unicast(
+ __in efx_nic_t *enp,
+ __in efx_filter_flags_t filter_flags)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ efx_rc_t rc;
+
+ /* Insert the unknown unicast filter */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+ efx_filter_spec_set_uc_def(&spec);
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);
+ if (rc != 0)
+ goto fail1;
+
+ eftp->eft_unicst_filter_count++;
+ EFSYS_ASSERT(eftp->eft_unicst_filter_count <=
+ EFX_EF10_FILTER_UNICAST_FILTERS_MAX);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_multicast_list(
+ __in efx_nic_t *enp,
+ __in boolean_t mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count,
+ __in efx_filter_flags_t filter_flags,
+ __in boolean_t rollback)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ uint8_t addr[6];
+ uint32_t i;
+ uint32_t filter_index;
+ uint32_t filter_count;
+ efx_rc_t rc;
+
+ if (mulcst == B_FALSE)
+ count = 0;
+
+ if (count + (brdcst ? 1 : 0) >
+ EFX_ARRAY_SIZE(eftp->eft_mulcst_filter_indexes)) {
+ /* Too many MAC addresses */
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Insert/renew multicast address list filters */
+ filter_count = 0;
+ for (i = 0; i < count; i++) {
+ efx_filter_spec_init_rx(&spec,
+ EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+
+ efx_filter_spec_set_eth_local(&spec,
+ EFX_FILTER_SPEC_VID_UNSPEC,
+ &addrs[i * EFX_MAC_ADDR_LEN]);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &filter_index);
+
+ if (rc == 0) {
+ eftp->eft_mulcst_filter_indexes[filter_count] =
+ filter_index;
+ filter_count++;
+ } else if (rollback == B_TRUE) {
+ /* Only stop upon failure if told to rollback */
+ goto rollback;
+ }
+
+ }
+
+ if (brdcst == B_TRUE) {
+ /* Insert/renew broadcast address filter */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+
+ EFX_MAC_BROADCAST_ADDR_SET(addr);
+ efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC,
+ addr);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &filter_index);
+
+ if (rc == 0) {
+ eftp->eft_mulcst_filter_indexes[filter_count] =
+ filter_index;
+ filter_count++;
+ } else if (rollback == B_TRUE) {
+ /* Only stop upon failure if told to rollback */
+ goto rollback;
+ }
+ }
+
+ eftp->eft_mulcst_filter_count = filter_count;
+ eftp->eft_using_all_mulcst = B_FALSE;
+
+ return (0);
+
+rollback:
+ /* Remove any filters we have inserted */
+ i = filter_count;
+ while (i--) {
+ (void) ef10_filter_delete_internal(enp,
+ eftp->eft_mulcst_filter_indexes[i]);
+ }
+ eftp->eft_mulcst_filter_count = 0;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_all_multicast(
+ __in efx_nic_t *enp,
+ __in efx_filter_flags_t filter_flags)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ efx_rc_t rc;
+
+ /* Insert the unknown multicast filter */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+ efx_filter_spec_set_mc_def(&spec);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &eftp->eft_mulcst_filter_indexes[0]);
+ if (rc != 0)
+ goto fail1;
+
+ eftp->eft_mulcst_filter_count = 1;
+ eftp->eft_using_all_mulcst = B_TRUE;
+
+ /*
+ * FIXME: If brdcst == B_FALSE, add a filter to drop broadcast traffic.
+ */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+ef10_filter_remove_old(
+ __in efx_nic_t *enp)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ uint32_t i;
+
+ for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {
+ if (ef10_filter_entry_is_auto_old(table, i)) {
+ (void) ef10_filter_delete_internal(enp, i);
+ }
+ }
+}
+
+
+static __checkReturn efx_rc_t
+ef10_filter_get_workarounds(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ uint32_t implemented = 0;
+ uint32_t enabled = 0;
+ efx_rc_t rc;
+
+ rc = efx_mcdi_get_workarounds(enp, &implemented, &enabled);
+ if (rc == 0) {
+ /* Check if chained multicast filter support is enabled */
+ if (implemented & enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807)
+ encp->enc_bug26807_workaround = B_TRUE;
+ else
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else if (rc == ENOTSUP) {
+ /*
+ * Firmware is too old to support GET_WORKAROUNDS, and support
+ * for this workaround was implemented later.
+ */
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else {
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+
+/*
+ * Reconfigure all filters.
+ * If all_unicst and/or all mulcst filters cannot be applied then
+ * return ENOTSUP (Note the filters for the specified addresses are
+ * still applied in this case).
+ */
+ __checkReturn efx_rc_t
+ef10_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_flags_t filter_flags;
+ unsigned int i;
+ efx_rc_t all_unicst_rc = 0;
+ efx_rc_t all_mulcst_rc = 0;
+ efx_rc_t rc;
+
+ if (table->eft_default_rxq == NULL) {
+ /*
+ * Filters direct traffic to the default RXQ, and so cannot be
+ * inserted until it is available. Any currently configured
+ * filters must be removed (ignore errors in case the MC
+ * has rebooted, which removes hardware filters).
+ */
+ for (i = 0; i < table->eft_unicst_filter_count; i++) {
+ (void) ef10_filter_delete_internal(enp,
+ table->eft_unicst_filter_indexes[i]);
+ }
+ table->eft_unicst_filter_count = 0;
+
+ for (i = 0; i < table->eft_mulcst_filter_count; i++) {
+ (void) ef10_filter_delete_internal(enp,
+ table->eft_mulcst_filter_indexes[i]);
+ }
+ table->eft_mulcst_filter_count = 0;
+
+ return (0);
+ }
+
+ if (table->eft_using_rss)
+ filter_flags = EFX_FILTER_FLAG_RX_RSS;
+ else
+ filter_flags = 0;
+
+ /* Mark old filters which may need to be removed */
+ for (i = 0; i < table->eft_unicst_filter_count; i++) {
+ ef10_filter_set_entry_auto_old(table,
+ table->eft_unicst_filter_indexes[i]);
+ }
+ for (i = 0; i < table->eft_mulcst_filter_count; i++) {
+ ef10_filter_set_entry_auto_old(table,
+ table->eft_mulcst_filter_indexes[i]);
+ }
+
+ /*
+ * Insert or renew unicast filters.
+ *
+ * Frimware does not perform chaining on unicast filters. As traffic is
+ * therefore only delivered to the first matching filter, we should
+ * always insert the specific filter for our MAC address, to try and
+ * ensure we get that traffic.
+ *
+ * (If the filter for our MAC address has already been inserted by
+ * another function, we won't receive traffic sent to us, even if we
+ * insert a unicast mismatch filter. To prevent traffic stealing, this
+ * therefore relies on the privilege model only allowing functions to
+ * insert filters for their own MAC address unless explicitly given
+ * additional privileges by the user. This also means that, even on a
+ * priviliged function, inserting a unicast mismatch filter may not
+ * catch all traffic in multi PCI function scenarios.)
+ */
+ table->eft_unicst_filter_count = 0;
+ rc = ef10_filter_insert_unicast(enp, mac_addr, filter_flags);
+ if (all_unicst || (rc != 0)) {
+ all_unicst_rc = ef10_filter_insert_all_unicast(enp,
+ filter_flags);
+ if ((rc != 0) && (all_unicst_rc != 0))
+ goto fail1;
+ }
+
+ /*
+ * WORKAROUND_BUG26807 controls firmware support for chained multicast
+ * filters, and can only be enabled or disabled when the hardware filter
+ * table is empty.
+ *
+ * Chained multicast filters require support from the datapath firmware,
+ * and may not be available (e.g. low-latency variants or old Huntington
+ * firmware).
+ *
+ * Firmware will reset (FLR) functions which have inserted filters in
+ * the hardware filter table when the workaround is enabled/disabled.
+ * Functions without any hardware filters are not reset.
+ *
+ * Re-check if the workaround is enabled after adding unicast hardware
+ * filters. This ensures that encp->enc_bug26807_workaround matches the
+ * firmware state, and that later changes to enable/disable the
+ * workaround will result in this function seeing a reset (FLR).
+ *
+ * In common-code drivers, we only support multiple PCI function
+ * scenarios with firmware that supports multicast chaining, so we can
+ * assume it is enabled for such cases and hence simplify the filter
+ * insertion logic. Firmware that does not support multicast chaining
+ * does not support multiple PCI function configurations either, so
+ * filter insertion is much simpler and the same strategies can still be
+ * used.
+ */
+ if ((rc = ef10_filter_get_workarounds(enp)) != 0)
+ goto fail2;
+
+ if ((table->eft_using_all_mulcst != all_mulcst) &&
+ (encp->enc_bug26807_workaround == B_TRUE)) {
+ /*
+ * Multicast filter chaining is enabled, so traffic that matches
+ * more than one multicast filter will be replicated and
+ * delivered to multiple recipients. To avoid this duplicate
+ * delivery, remove old multicast filters before inserting new
+ * multicast filters.
+ */
+ ef10_filter_remove_old(enp);
+ }
+
+ /* Insert or renew multicast filters */
+ if (all_mulcst == B_TRUE) {
+ /*
+ * Insert the all multicast filter. If that fails, try to insert
+ * all of our multicast filters (but without rollback on
+ * failure).
+ */
+ all_mulcst_rc = ef10_filter_insert_all_multicast(enp,
+ filter_flags);
+ if (all_mulcst_rc != 0) {
+ rc = ef10_filter_insert_multicast_list(enp, B_TRUE,
+ brdcst, addrs, count, filter_flags, B_FALSE);
+ if (rc != 0)
+ goto fail3;
+ }
+ } else {
+ /*
+ * Insert filters for multicast addresses.
+ * If any insertion fails, then rollback and try to insert the
+ * all multicast filter instead.
+ * If that also fails, try to insert all of the multicast
+ * filters (but without rollback on failure).
+ */
+ rc = ef10_filter_insert_multicast_list(enp, mulcst, brdcst,
+ addrs, count, filter_flags, B_TRUE);
+ if (rc != 0) {
+ if ((table->eft_using_all_mulcst == B_FALSE) &&
+ (encp->enc_bug26807_workaround == B_TRUE)) {
+ /*
+ * Multicast filter chaining is on, so remove
+ * old filters before inserting the multicast
+ * all filter to avoid duplicate delivery caused
+ * by packets matching multiple filters.
+ */
+ ef10_filter_remove_old(enp);
+ }
+
+ rc = ef10_filter_insert_all_multicast(enp,
+ filter_flags);
+ if (rc != 0) {
+ rc = ef10_filter_insert_multicast_list(enp,
+ mulcst, brdcst,
+ addrs, count, filter_flags, B_FALSE);
+ if (rc != 0)
+ goto fail4;
+ }
+ }
+ }
+
+ /* Remove old filters which were not renewed */
+ ef10_filter_remove_old(enp);
+
+ /* report if any optional flags were rejected */
+ if (((all_unicst != B_FALSE) && (all_unicst_rc != 0)) ||
+ ((all_mulcst != B_FALSE) && (all_mulcst_rc != 0))) {
+ rc = ENOTSUP;
+ }
+
+ return (rc);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Clear auto old flags */
+ for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {
+ if (ef10_filter_entry_is_auto_old(table, i)) {
+ ef10_filter_set_entry_not_auto_old(table, i);
+ }
+ }
+
+ return (rc);
+}
+
+ void
+ef10_filter_get_default_rxq(
+ __in efx_nic_t *enp,
+ __out efx_rxq_t **erpp,
+ __out boolean_t *using_rss)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+
+ *erpp = table->eft_default_rxq;
+ *using_rss = table->eft_using_rss;
+}
+
+
+ void
+ef10_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+
+#if EFSYS_OPT_RX_SCALE
+ EFSYS_ASSERT((using_rss == B_FALSE) ||
+ (enp->en_rss_context != EF10_RSS_CONTEXT_INVALID));
+ table->eft_using_rss = using_rss;
+#else
+ EFSYS_ASSERT(using_rss == B_FALSE);
+ table->eft_using_rss = B_FALSE;
+#endif
+ table->eft_default_rxq = erp;
+}
+
+ void
+ef10_filter_default_rxq_clear(
+ __in efx_nic_t *enp)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+
+ table->eft_default_rxq = NULL;
+ table->eft_using_rss = B_FALSE;
+}
+
+
+#endif /* EFSYS_OPT_FILTER */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/ef10_impl.h b/src/seastar/dpdk/drivers/net/sfc/base/ef10_impl.h
new file mode 100644
index 00000000..8c3dffee
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/ef10_impl.h
@@ -0,0 +1,1183 @@
+/*
+ * Copyright (c) 2015-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EF10_IMPL_H
+#define _SYS_EF10_IMPL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if (EFSYS_OPT_HUNTINGTON && EFSYS_OPT_MEDFORD)
+#define EF10_MAX_PIOBUF_NBUFS MAX(HUNT_PIOBUF_NBUFS, MEDFORD_PIOBUF_NBUFS)
+#elif EFSYS_OPT_HUNTINGTON
+#define EF10_MAX_PIOBUF_NBUFS HUNT_PIOBUF_NBUFS
+#elif EFSYS_OPT_MEDFORD
+#define EF10_MAX_PIOBUF_NBUFS MEDFORD_PIOBUF_NBUFS
+#endif
+
+/*
+ * FIXME: This is just a power of 2 which fits in an MCDI v1 message, and could
+ * possibly be increased, or the write size reported by newer firmware used
+ * instead.
+ */
+#define EF10_NVRAM_CHUNK 0x80
+
+/* Alignment requirement for value written to RX WPTR:
+ * the WPTR must be aligned to an 8 descriptor boundary
+ */
+#define EF10_RX_WPTR_ALIGN 8
+
+/*
+ * Max byte offset into the packet the TCP header must start for the hardware
+ * to be able to parse the packet correctly.
+ */
+#define EF10_TCP_HEADER_OFFSET_LIMIT 208
+
+/* Invalid RSS context handle */
+#define EF10_RSS_CONTEXT_INVALID (0xffffffff)
+
+
+/* EV */
+
+ __checkReturn efx_rc_t
+ef10_ev_init(
+ __in efx_nic_t *enp);
+
+ void
+ef10_ev_fini(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in efx_evq_t *eep);
+
+ void
+ef10_ev_qdestroy(
+ __in efx_evq_t *eep);
+
+ __checkReturn efx_rc_t
+ef10_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+ void
+ef10_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data);
+
+ __checkReturn efx_rc_t
+ef10_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us);
+
+#if EFSYS_OPT_QSTATS
+ void
+ef10_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
+#endif /* EFSYS_OPT_QSTATS */
+
+ void
+ef10_ev_rxlabel_init(
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp,
+ __in unsigned int label,
+ __in boolean_t packed_stream);
+
+ void
+ef10_ev_rxlabel_fini(
+ __in efx_evq_t *eep,
+ __in unsigned int label);
+
+/* INTR */
+
+ __checkReturn efx_rc_t
+ef10_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp);
+
+ void
+ef10_intr_enable(
+ __in efx_nic_t *enp);
+
+ void
+ef10_intr_disable(
+ __in efx_nic_t *enp);
+
+ void
+ef10_intr_disable_unlocked(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level);
+
+ void
+ef10_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp);
+
+ void
+ef10_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp);
+
+ void
+ef10_intr_fatal(
+ __in efx_nic_t *enp);
+ void
+ef10_intr_fini(
+ __in efx_nic_t *enp);
+
+/* NIC */
+
+extern __checkReturn efx_rc_t
+ef10_nic_probe(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *vi_countp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nic_reset(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+ef10_nic_register_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern void
+ef10_nic_fini(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_nic_unprobe(
+ __in efx_nic_t *enp);
+
+
+/* MAC */
+
+extern __checkReturn efx_rc_t
+ef10_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+ef10_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_addr_set(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_pdu_set(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu);
+
+extern __checkReturn efx_rc_t
+ef10_mac_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_multicast_list_set(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss);
+
+extern void
+ef10_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_LOOPBACK
+
+extern __checkReturn efx_rc_t
+ef10_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type);
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+extern __checkReturn efx_rc_t
+ef10_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size);
+
+extern __checkReturn efx_rc_t
+ef10_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+
+/* MCDI */
+
+#if EFSYS_OPT_MCDI
+
+extern __checkReturn efx_rc_t
+ef10_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp);
+
+extern void
+ef10_mcdi_fini(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in_bcount(hdr_len) void *hdrp,
+ __in size_t hdr_len,
+ __in_bcount(sdu_len) void *sdup,
+ __in size_t sdu_len);
+
+extern __checkReturn boolean_t
+ef10_mcdi_poll_response(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length);
+
+extern efx_rc_t
+ef10_mcdi_poll_reboot(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp);
+
+extern void
+ef10_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+/* NVRAM */
+
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buf_read_tlv(
+ __in efx_nic_t *enp,
+ __in_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*sizep) caddr_t *datap,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buf_write_tlv(
+ __inout_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size,
+ __in uint32_t tag,
+ __in_bcount(tag_size) caddr_t tag_data,
+ __in size_t tag_size,
+ __out size_t *total_lengthp);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_read_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*sizep) caddr_t *datap,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_write_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_write_segment_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t all_segments);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt uint32_t *resultp);
+
+#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
+
+#if EFSYS_OPT_NVRAM
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+ef10_nvram_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+ef10_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_read_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_validate(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_create(
+ __in efx_nic_t *enp,
+ __in uint16_t partn_type,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_find_item_start(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_find_end(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+extern __checkReturn __success(return != B_FALSE) boolean_t
+ef10_nvram_buffer_find_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_get_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(item_max_size, *lengthp)
+ caddr_t itemp,
+ __in size_t item_max_size,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_insert_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_delete_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_finish(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+#endif /* EFSYS_OPT_NVRAM */
+
+
+/* PHY */
+
+typedef struct ef10_link_state_s {
+ uint32_t els_adv_cap_mask;
+ uint32_t els_lp_cap_mask;
+ unsigned int els_fcntl;
+ efx_link_mode_t els_link_mode;
+#if EFSYS_OPT_LOOPBACK
+ efx_loopback_type_t els_loopback;
+#endif
+ boolean_t els_mac_up;
+} ef10_link_state_t;
+
+extern void
+ef10_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+ef10_phy_get_link(
+ __in efx_nic_t *enp,
+ __out ef10_link_state_t *elsp);
+
+extern __checkReturn efx_rc_t
+ef10_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t on);
+
+extern __checkReturn efx_rc_t
+ef10_phy_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_phy_verify(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip);
+
+#if EFSYS_OPT_PHY_STATS
+
+extern __checkReturn efx_rc_t
+ef10_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat);
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+
+extern __checkReturn efx_rc_t
+ef10_bist_enable_offline(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+extern __checkReturn efx_rc_t
+ef10_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count);
+
+extern void
+ef10_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+#endif /* EFSYS_OPT_BIST */
+
+/* TX */
+
+extern __checkReturn efx_rc_t
+ef10_tx_init(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_tx_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp);
+
+extern void
+ef10_tx_qdestroy(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern void
+ef10_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+extern void
+ef10_rx_qps_update_credits(
+ __in efx_rxq_t *erp);
+
+extern __checkReturn uint8_t *
+ef10_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp);
+#endif
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qflush(
+ __in efx_txq_t *etp);
+
+extern void
+ef10_tx_qenable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpio_enable(
+ __in efx_txq_t *etp);
+
+extern void
+ef10_tx_qpio_disable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(buf_length) uint8_t *buffer,
+ __in size_t buf_length,
+ __in size_t pio_buf_offset);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern void
+ef10_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp);
+
+extern void
+ef10_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp);
+
+extern void
+ef10_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t tcp_mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count);
+
+extern void
+ef10_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t vlan_tci,
+ __out efx_desc_t *edp);
+
+
+#if EFSYS_OPT_QSTATS
+
+extern void
+ef10_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
+
+#endif /* EFSYS_OPT_QSTATS */
+
+typedef uint32_t efx_piobuf_handle_t;
+
+#define EFX_PIOBUF_HANDLE_INVALID ((efx_piobuf_handle_t) -1)
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_alloc(
+ __inout efx_nic_t *enp,
+ __out uint32_t *bufnump,
+ __out efx_piobuf_handle_t *handlep,
+ __out uint32_t *blknump,
+ __out uint32_t *offsetp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_free(
+ __inout efx_nic_t *enp,
+ __in uint32_t bufnum,
+ __in uint32_t blknum);
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_link(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle);
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_unlink(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index);
+
+
+/* VPD */
+
+#if EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+ef10_vpd_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+ef10_vpd_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_VPD */
+
+
+/* RX */
+
+extern __checkReturn efx_rc_t
+ef10_rx_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_RX_SCATTER
+extern __checkReturn efx_rc_t
+ef10_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+
+#if EFSYS_OPT_RX_SCALE
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert);
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n);
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n);
+
+extern __checkReturn uint32_t
+ef10_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer);
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+extern __checkReturn efx_rc_t
+ef10_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp);
+
+extern void
+ef10_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added);
+
+extern void
+ef10_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp);
+
+extern __checkReturn efx_rc_t
+ef10_rx_qflush(
+ __in efx_rxq_t *erp);
+
+extern void
+ef10_rx_qenable(
+ __in efx_rxq_t *erp);
+
+extern __checkReturn efx_rc_t
+ef10_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp);
+
+extern void
+ef10_rx_qdestroy(
+ __in efx_rxq_t *erp);
+
+extern void
+ef10_rx_fini(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_FILTER
+
+typedef struct ef10_filter_handle_s {
+ uint32_t efh_lo;
+ uint32_t efh_hi;
+} ef10_filter_handle_t;
+
+typedef struct ef10_filter_entry_s {
+ uintptr_t efe_spec; /* pointer to filter spec plus busy bit */
+ ef10_filter_handle_t efe_handle;
+} ef10_filter_entry_t;
+
+/*
+ * BUSY flag indicates that an update is in progress.
+ * AUTO_OLD flag is used to mark and sweep MAC packet filters.
+ */
+#define EFX_EF10_FILTER_FLAG_BUSY 1U
+#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2U
+#define EFX_EF10_FILTER_FLAGS 3U
+
+/*
+ * Size of the hash table used by the driver. Doesn't need to be the
+ * same size as the hardware's table.
+ */
+#define EFX_EF10_FILTER_TBL_ROWS 8192
+
+/* Only need to allow for one directed and one unknown unicast filter */
+#define EFX_EF10_FILTER_UNICAST_FILTERS_MAX 2
+
+/* Allow for the broadcast address to be added to the multicast list */
+#define EFX_EF10_FILTER_MULTICAST_FILTERS_MAX (EFX_MAC_MULTICAST_LIST_MAX + 1)
+
+typedef struct ef10_filter_table_s {
+ ef10_filter_entry_t eft_entry[EFX_EF10_FILTER_TBL_ROWS];
+ efx_rxq_t *eft_default_rxq;
+ boolean_t eft_using_rss;
+ uint32_t eft_unicst_filter_indexes[
+ EFX_EF10_FILTER_UNICAST_FILTERS_MAX];
+ uint32_t eft_unicst_filter_count;
+ uint32_t eft_mulcst_filter_indexes[
+ EFX_EF10_FILTER_MULTICAST_FILTERS_MAX];
+ uint32_t eft_mulcst_filter_count;
+ boolean_t eft_using_all_mulcst;
+} ef10_filter_table_t;
+
+ __checkReturn efx_rc_t
+ef10_filter_init(
+ __in efx_nic_t *enp);
+
+ void
+ef10_filter_fini(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_filter_restore(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace);
+
+ __checkReturn efx_rc_t
+ef10_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+ef10_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp);
+
+extern __checkReturn efx_rc_t
+ef10_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count);
+
+extern void
+ef10_filter_get_default_rxq(
+ __in efx_nic_t *enp,
+ __out efx_rxq_t **erpp,
+ __out boolean_t *using_rss);
+
+extern void
+ef10_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss);
+
+extern void
+ef10_filter_default_rxq_clear(
+ __in efx_nic_t *enp);
+
+
+#endif /* EFSYS_OPT_FILTER */
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_function_info(
+ __in efx_nic_t *enp,
+ __out uint32_t *pfp,
+ __out_opt uint32_t *vfp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_privilege_mask(
+ __in efx_nic_t *enp,
+ __in uint32_t pf,
+ __in uint32_t vf,
+ __out uint32_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_port_assignment(
+ __in efx_nic_t *enp,
+ __out uint32_t *portp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_port_modes(
+ __in efx_nic_t *enp,
+ __out uint32_t *modesp,
+ __out_opt uint32_t *current_modep);
+
+extern __checkReturn efx_rc_t
+ef10_nic_get_port_mode_bandwidth(
+ __in uint32_t port_mode,
+ __out uint32_t *bandwidth_mbpsp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_pf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6]);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_vf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6]);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_clock(
+ __in efx_nic_t *enp,
+ __out uint32_t *sys_freqp,
+ __out uint32_t *dpcpu_freqp);
+
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_vector_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *vec_basep,
+ __out_opt uint32_t *pf_nvecp,
+ __out_opt uint32_t *vf_nvecp);
+
+extern __checkReturn efx_rc_t
+ef10_get_datapath_caps(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_get_privilege_mask(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp);
+
+extern __checkReturn efx_rc_t
+ef10_external_port_mapping(
+ __in efx_nic_t *enp,
+ __in uint32_t port,
+ __out uint8_t *external_portp);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+/* Data space per credit in packed stream mode */
+#define EFX_RX_PACKED_STREAM_MEM_PER_CREDIT (1 << 16)
+
+/*
+ * Received packets are always aligned at this boundary. Also there always
+ * exists a gap of this size between packets.
+ * (see SF-112241-TC, 4.5)
+ */
+#define EFX_RX_PACKED_STREAM_ALIGNMENT 64
+
+/*
+ * Size of a pseudo-header prepended to received packets
+ * in packed stream mode
+ */
+#define EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE 8
+
+/* Minimum space for packet in packed stream mode */
+#define EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE \
+ P2ROUNDUP(EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE + \
+ EFX_MAC_PDU_MIN + \
+ EFX_RX_PACKED_STREAM_ALIGNMENT, \
+ EFX_RX_PACKED_STREAM_ALIGNMENT)
+
+/* Maximum number of credits */
+#define EFX_RX_PACKED_STREAM_MAX_CREDITS 127
+
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EF10_IMPL_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/ef10_intr.c b/src/seastar/dpdk/drivers/net/sfc/base/ef10_intr.c
new file mode 100644
index 00000000..16be3d8c
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/ef10_intr.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+ __checkReturn efx_rc_t
+ef10_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp)
+{
+ _NOTE(ARGUNUSED(enp, type, esmp))
+ return (0);
+}
+
+
+ void
+ef10_intr_enable(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+
+ void
+ef10_intr_disable(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+
+ void
+ef10_intr_disable_unlocked(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_trigger_interrupt(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_TRIGGER_INTERRUPT_IN_LEN,
+ MC_CMD_TRIGGER_INTERRUPT_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (level >= enp->en_nic_cfg.enc_intr_limit) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_TRIGGER_INTERRUPT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_TRIGGER_INTERRUPT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_TRIGGER_INTERRUPT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, TRIGGER_INTERRUPT_IN_INTR_LEVEL, level);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+
+ if (encp->enc_bug41750_workaround) {
+ /*
+ * bug 41750: Test interrupts don't work on Greenport
+ * bug 50084: Test interrupts don't work on VFs
+ */
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_trigger_interrupt(enp, level)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp)
+{
+ efx_dword_t dword;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Read the queue mask and implicitly acknowledge the interrupt. */
+ EFX_BAR_READD(enp, ER_DZ_BIU_INT_ISR_REG, &dword, B_FALSE);
+ *qmaskp = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+
+ EFSYS_PROBE1(qmask, uint32_t, *qmaskp);
+
+ *fatalp = B_FALSE;
+}
+
+ void
+ef10_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ _NOTE(ARGUNUSED(enp, message))
+
+ /* EF10 fatal errors are reported via events */
+ *fatalp = B_FALSE;
+}
+
+ void
+ef10_intr_fatal(
+ __in efx_nic_t *enp)
+{
+ /* EF10 fatal errors are reported via events */
+ _NOTE(ARGUNUSED(enp))
+}
+
+ void
+ef10_intr_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/ef10_mac.c b/src/seastar/dpdk/drivers/net/sfc/base/ef10_mac.c
new file mode 100644
index 00000000..488633f5
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/ef10_mac.c
@@ -0,0 +1,897 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+ __checkReturn efx_rc_t
+ef10_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ ef10_link_state_t els;
+ efx_rc_t rc;
+
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail1;
+
+ epp->ep_adv_cap_mask = els.els_adv_cap_mask;
+ epp->ep_fcntl = els.els_fcntl;
+
+ *link_modep = els.els_link_mode;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp)
+{
+ ef10_link_state_t els;
+ efx_rc_t rc;
+
+ /*
+ * Because EF10 doesn't *require* polling, we can't rely on
+ * ef10_mac_poll() being executed to populate epp->ep_mac_up.
+ */
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail1;
+
+ *mac_upp = els.els_mac_up;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * EF10 adapters use MC_CMD_VADAPTOR_SET_MAC to set the
+ * MAC address; the address field in MC_CMD_SET_MAC has no
+ * effect.
+ * MC_CMD_VADAPTOR_SET_MAC requires mac-spoofing privilege and
+ * the port to have no filters or queues active.
+ */
+static __checkReturn efx_rc_t
+efx_mcdi_vadapter_set_mac(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VADAPTOR_SET_MAC_IN_LEN,
+ MC_CMD_VADAPTOR_SET_MAC_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VADAPTOR_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VADAPTOR_SET_MAC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VADAPTOR_SET_MAC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
+ enp->en_vport_id);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, VADAPTOR_SET_MAC_IN_MACADDR),
+ epp->ep_mac_addr);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_addr_set(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_vadapter_set_mac(enp)) != 0) {
+ if (rc != ENOTSUP)
+ goto fail1;
+
+ /*
+ * Fallback for older Huntington firmware without Vadapter
+ * support.
+ */
+ if ((rc = ef10_mac_reconfigure(enp)) != 0)
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_mtu_set(
+ __in efx_nic_t *enp,
+ __in uint32_t mtu)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN,
+ MC_CMD_SET_MAC_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN;
+
+ /* Only configure the MTU in this call to MC_CMD_SET_MAC */
+ MCDI_IN_SET_DWORD(req, SET_MAC_EXT_IN_MTU, mtu);
+ MCDI_IN_POPULATE_DWORD_1(req, SET_MAC_EXT_IN_CONTROL,
+ SET_MAC_EXT_IN_CFG_MTU, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_mtu_get(
+ __in efx_nic_t *enp,
+ __out size_t *mtu)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN,
+ MC_CMD_SET_MAC_V2_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_V2_OUT_LEN;
+
+ /*
+ * With MC_CMD_SET_MAC_EXT_IN_CONTROL set to 0, this just queries the
+ * MTU. This should always be supported on Medford, but it is not
+ * supported on older Huntington firmware.
+ */
+ MCDI_IN_SET_DWORD(req, SET_MAC_EXT_IN_CONTROL, 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ if (req.emr_out_length_used < MC_CMD_SET_MAC_V2_OUT_MTU_OFST + 4) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *mtu = MCDI_OUT_DWORD(req, SET_MAC_V2_OUT_MTU);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_pdu_set(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+
+ if (encp->enc_enhanced_set_mac_supported) {
+ if ((rc = efx_mcdi_mtu_set(enp, epp->ep_mac_pdu)) != 0)
+ goto fail1;
+ } else {
+ /*
+ * Fallback for older Huntington firmware, which always
+ * configure all of the parameters to MC_CMD_SET_MAC. This isn't
+ * suitable for setting the MTU on unpriviliged functions.
+ */
+ if ((rc = ef10_mac_reconfigure(enp)) != 0)
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_mtu_get(enp, pdu)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+__checkReturn efx_rc_t
+ef10_mac_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_MAC_IN_LEN,
+ MC_CMD_SET_MAC_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu);
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR),
+ epp->ep_mac_addr);
+
+ /*
+ * Note: The Huntington MAC does not support REJECT_BRDCST.
+ * The REJECT_UNCST flag will also prevent multicast traffic
+ * from reaching the filters. As Huntington filters drop any
+ * traffic that does not match a filter it is ok to leave the
+ * MAC running in promiscuous mode. See bug41141.
+ *
+ * FIXME: Does REJECT_UNCST behave the same way on Medford?
+ */
+ MCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, 0,
+ SET_MAC_IN_REJECT_BRDCST, 0);
+
+ /*
+ * Flow control, whether it is auto-negotiated or not,
+ * is set via the PHY advertised capabilities. When set to
+ * automatic the MAC will use the PHY settings to determine
+ * the flow control settings.
+ */
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, MC_CMD_FCNTL_AUTO);
+
+ /* Do not include the Ethernet frame checksum in RX packets */
+ MCDI_IN_POPULATE_DWORD_1(req, SET_MAC_IN_FLAGS,
+ SET_MAC_IN_FLAG_INCLUDE_FCS, 0);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ /*
+ * Unprivileged functions cannot control link state,
+ * but still need to configure filters.
+ */
+ if (req.emr_rc != EACCES) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ }
+
+ /*
+ * Apply the filters for the MAC configuration.
+ * If the NIC isn't ready to accept filters this may
+ * return success without setting anything.
+ */
+ rc = efx_filter_reconfigure(enp, epp->ep_mac_addr,
+ epp->ep_all_unicst, epp->ep_mulcst,
+ epp->ep_all_mulcst, epp->ep_brdcst,
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_multicast_list_set(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_rxq_t *old_rxq;
+ boolean_t old_using_rss;
+ efx_rc_t rc;
+
+ ef10_filter_get_default_rxq(enp, &old_rxq, &old_using_rss);
+
+ ef10_filter_default_rxq_set(enp, erp, using_rss);
+
+ rc = efx_filter_reconfigure(enp, epp->ep_mac_addr,
+ epp->ep_all_unicst, epp->ep_mulcst,
+ epp->ep_all_mulcst, epp->ep_brdcst,
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count);
+
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ ef10_filter_default_rxq_set(enp, old_rxq, old_using_rss);
+
+ return (rc);
+}
+
+ void
+ef10_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ ef10_filter_default_rxq_clear(enp);
+
+ efx_filter_reconfigure(enp, epp->ep_mac_addr,
+ epp->ep_all_unicst, epp->ep_mulcst,
+ epp->ep_all_mulcst, epp->ep_brdcst,
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count);
+}
+
+
+#if EFSYS_OPT_LOOPBACK
+
+ __checkReturn efx_rc_t
+ef10_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_loopback_type_t old_loopback_type;
+ efx_link_mode_t old_loopback_link_mode;
+ efx_rc_t rc;
+
+ /* The PHY object handles this on EF10 */
+ old_loopback_type = epp->ep_loopback_type;
+ old_loopback_link_mode = epp->ep_loopback_link_mode;
+ epp->ep_loopback_type = loopback_type;
+ epp->ep_loopback_link_mode = link_mode;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ epp->ep_loopback_type = old_loopback_type;
+ epp->ep_loopback_link_mode = old_loopback_link_mode;
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+ __checkReturn efx_rc_t
+ef10_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size)
+{
+ const struct efx_mac_stats_range ef10_common[] = {
+ { EFX_MAC_RX_OCTETS, EFX_MAC_RX_GE_15XX_PKTS },
+ { EFX_MAC_RX_FCS_ERRORS, EFX_MAC_RX_DROP_EVENTS },
+ { EFX_MAC_RX_JABBER_PKTS, EFX_MAC_RX_JABBER_PKTS },
+ { EFX_MAC_RX_NODESC_DROP_CNT, EFX_MAC_TX_PAUSE_PKTS },
+ };
+ const struct efx_mac_stats_range ef10_tx_size_bins[] = {
+ { EFX_MAC_TX_LE_64_PKTS, EFX_MAC_TX_GE_15XX_PKTS },
+ };
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_port_t *epp = &(enp->en_port);
+ efx_rc_t rc;
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_common, EFX_ARRAY_SIZE(ef10_common))) != 0)
+ goto fail1;
+
+ if (epp->ep_phy_cap_mask & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
+ const struct efx_mac_stats_range ef10_40g_extra[] = {
+ { EFX_MAC_RX_ALIGN_ERRORS, EFX_MAC_RX_ALIGN_ERRORS },
+ };
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_40g_extra, EFX_ARRAY_SIZE(ef10_40g_extra))) != 0)
+ goto fail2;
+
+ if (encp->enc_mac_stats_40g_tx_size_bins) {
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp,
+ mask_size, ef10_tx_size_bins,
+ EFX_ARRAY_SIZE(ef10_tx_size_bins))) != 0)
+ goto fail3;
+ }
+ } else {
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_tx_size_bins, EFX_ARRAY_SIZE(ef10_tx_size_bins))) != 0)
+ goto fail4;
+ }
+
+ if (encp->enc_pm_and_rxdp_counters) {
+ const struct efx_mac_stats_range ef10_pm_and_rxdp[] = {
+ { EFX_MAC_PM_TRUNC_BB_OVERFLOW, EFX_MAC_RXDP_HLB_WAIT },
+ };
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_pm_and_rxdp, EFX_ARRAY_SIZE(ef10_pm_and_rxdp))) != 0)
+ goto fail5;
+ }
+
+ if (encp->enc_datapath_cap_evb) {
+ const struct efx_mac_stats_range ef10_vadaptor[] = {
+ { EFX_MAC_VADAPTER_RX_UNICAST_PACKETS,
+ EFX_MAC_VADAPTER_TX_OVERFLOW },
+ };
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_vadaptor, EFX_ARRAY_SIZE(ef10_vadaptor))) != 0)
+ goto fail6;
+ }
+
+ return (0);
+
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#define EF10_MAC_STAT_READ(_esmp, _field, _eqp) \
+ EFSYS_MEM_READQ((_esmp), (_field) * sizeof (efx_qword_t), _eqp)
+
+
+ __checkReturn efx_rc_t
+ef10_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp)
+{
+ efx_qword_t value;
+ efx_qword_t generation_start;
+ efx_qword_t generation_end;
+
+ _NOTE(ARGUNUSED(enp))
+
+ /* Read END first so we don't race with the MC */
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END,
+ &generation_end);
+ EFSYS_MEM_READ_BARRIER();
+
+ /* TX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_CONTROL_PKTS, &value);
+ EFSYS_STAT_SUBR_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PAUSE_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_UNICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULTICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_BRDCST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_OCTETS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LT64_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_64_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_65_TO_127_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_128_TO_255_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_256_TO_511_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_512_TO_1023_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_1024_TO_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_SGL_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULT_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LATE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LATE_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_DEFERRED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_DEF_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_DEF_PKTS]), &value);
+
+ /* RX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_OCTETS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_UNICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MULTICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_BRDCST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PAUSE_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNDERSIZE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_64_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_65_TO_127_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_128_TO_255_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_256_TO_511_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_512_TO_1023_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_1024_TO_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FCS_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_OVERFLOW_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_DROP_EVENTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_FALSE_CARRIER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FALSE_CARRIER_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_SYMBOL_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_ALIGN_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_ALIGN_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_INTERNAL_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_JABBER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_JABBER_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MATCH_FAULT, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MATCH_FAULT]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value);
+
+ /* Packet memory (EF10 only) */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_BB_OVERFLOW]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_BB_OVERFLOW]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_VFIFO_FULL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_VFIFO_FULL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_VFIFO_FULL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_VFIFO_FULL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_QBB, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_QBB]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_QBB, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_QBB]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_MAPPING, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_MAPPING]), &value);
+
+ /* RX datapath */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_Q_DISABLED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_Q_DISABLED_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_DI_DROPPED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_DI_DROPPED_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_STREAMING_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_STREAMING_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_FETCH]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_WAIT]), &value);
+
+
+ /* VADAPTER RX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_UNICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BAD_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BAD_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BAD_BYTES]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_OVERFLOW]), &value);
+
+ /* VADAPTER TX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_UNICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BAD_PACKETS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BAD_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BAD_BYTES]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_OVERFLOW]), &value);
+
+
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ EFSYS_MEM_READ_BARRIER();
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START,
+ &generation_start);
+
+ /* Check that we didn't read the stats in the middle of a DMA */
+ /* Not a good enough check ? */
+ if (memcmp(&generation_start, &generation_end,
+ sizeof (generation_start)))
+ return (EAGAIN);
+
+ if (generationp)
+ *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0);
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/ef10_mcdi.c b/src/seastar/dpdk/drivers/net/sfc/base/ef10_mcdi.c
new file mode 100644
index 00000000..5a26bda2
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/ef10_mcdi.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_MCDI
+
+#ifndef WITH_MCDI_V2
+#error "WITH_MCDI_V2 required for EF10 MCDIv2 commands."
+#endif
+
+
+ __checkReturn efx_rc_t
+ef10_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *emtp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ efx_dword_t dword;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+ EFSYS_ASSERT(enp->en_features & EFX_FEATURE_MCDI_DMA);
+
+ /*
+ * All EF10 firmware supports MCDIv2 and MCDIv1.
+ * Medford BootROM supports MCDIv2 and MCDIv1.
+ * Huntington BootROM supports MCDIv1 only.
+ */
+ emip->emi_max_version = 2;
+
+ /* A host DMA buffer is required for EF10 MCDI */
+ if (esmp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * Ensure that the MC doorbell is in a known state before issuing MCDI
+ * commands. The recovery algorithm requires that the MC command buffer
+ * must be 256 byte aligned. See bug24769.
+ */
+ if ((EFSYS_MEM_ADDR(esmp) & 0xFF) != 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 1);
+ EFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE);
+
+ /* Save initial MC reboot status */
+ (void) ef10_mcdi_poll_reboot(enp);
+
+ /* Start a new epoch (allow fresh MCDI requests to succeed) */
+ efx_mcdi_new_epoch(enp);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_mcdi_fini(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+
+ emip->emi_new_epoch = B_FALSE;
+}
+
+/*
+ * In older firmware all commands are processed in a single thread, so a long
+ * running command for one PCIe function can block processing for another
+ * function (see bug 61269).
+ *
+ * In newer firmware that supports multithreaded MCDI processing, we can extend
+ * the timeout for long-running requests which we know firmware may choose to
+ * process in a background thread.
+ */
+#define EF10_MCDI_CMD_TIMEOUT_US (10 * 1000 * 1000)
+#define EF10_MCDI_CMD_LONG_TIMEOUT_US (60 * 1000 * 1000)
+
+ void
+ef10_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+
+ switch (emrp->emr_cmd) {
+ case MC_CMD_POLL_BIST:
+ case MC_CMD_NVRAM_ERASE:
+ case MC_CMD_LICENSING_V3:
+ case MC_CMD_NVRAM_UPDATE_FINISH:
+ if (encp->enc_fw_verified_nvram_update_required != B_FALSE) {
+ /*
+ * Potentially longer running commands, which firmware
+ * may choose to process in a background thread.
+ */
+ *timeoutp = EF10_MCDI_CMD_LONG_TIMEOUT_US;
+ break;
+ }
+ /* FALLTHRU */
+ default:
+ *timeoutp = EF10_MCDI_CMD_TIMEOUT_US;
+ break;
+ }
+}
+
+ void
+ef10_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in_bcount(hdr_len) void *hdrp,
+ __in size_t hdr_len,
+ __in_bcount(sdu_len) void *sdup,
+ __in size_t sdu_len)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ efx_dword_t dword;
+ unsigned int pos;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Write the header */
+ for (pos = 0; pos < hdr_len; pos += sizeof (efx_dword_t)) {
+ dword = *(efx_dword_t *)((uint8_t *)hdrp + pos);
+ EFSYS_MEM_WRITED(esmp, pos, &dword);
+ }
+
+ /* Write the payload */
+ for (pos = 0; pos < sdu_len; pos += sizeof (efx_dword_t)) {
+ dword = *(efx_dword_t *)((uint8_t *)sdup + pos);
+ EFSYS_MEM_WRITED(esmp, hdr_len + pos, &dword);
+ }
+
+ /* Guarantee ordering of memory (MCDI request) and PIO (MC doorbell) */
+ EFSYS_DMA_SYNC_FOR_DEVICE(esmp, 0, hdr_len + sdu_len);
+ EFSYS_PIO_WRITE_BARRIER();
+
+ /* Ring the doorbell to post the command DMA address to the MC */
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0,
+ EFSYS_MEM_ADDR(esmp) >> 32);
+ EFX_BAR_WRITED(enp, ER_DZ_MC_DB_LWRD_REG, &dword, B_FALSE);
+
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0,
+ EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ EFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE);
+}
+
+ __checkReturn boolean_t
+ef10_mcdi_poll_response(
+ __in efx_nic_t *enp)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ efx_dword_t hdr;
+
+ EFSYS_MEM_READD(esmp, 0, &hdr);
+ EFSYS_MEM_READ_BARRIER();
+
+ return (EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE) ? B_TRUE : B_FALSE);
+}
+
+ void
+ef10_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ unsigned int pos;
+ efx_dword_t data;
+
+ for (pos = 0; pos < length; pos += sizeof (efx_dword_t)) {
+ EFSYS_MEM_READD(esmp, offset + pos, &data);
+ memcpy((uint8_t *)bufferp + pos, &data,
+ MIN(sizeof (data), length - pos));
+ }
+}
+
+ efx_rc_t
+ef10_mcdi_poll_reboot(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t dword;
+ uint32_t old_status;
+ uint32_t new_status;
+ efx_rc_t rc;
+
+ old_status = emip->emi_mc_reboot_status;
+
+ /* Update MC reboot status word */
+ EFX_BAR_TBL_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, 0, &dword, B_FALSE);
+ new_status = dword.ed_u32[0];
+
+ /* MC has rebooted if the value has changed */
+ if (new_status != old_status) {
+ emip->emi_mc_reboot_status = new_status;
+
+ /*
+ * FIXME: Ignore detected MC REBOOT for now.
+ *
+ * The Siena support for checking for MC reboot from status
+ * flags is broken - see comments in siena_mcdi_poll_reboot().
+ * As the generic MCDI code is shared the EF10 reboot
+ * detection suffers similar problems.
+ *
+ * Do not report an error when the boot status changes until
+ * this can be handled by common code drivers (and reworked to
+ * support Siena too).
+ */
+ _NOTE(CONSTANTCONDITION)
+ if (B_FALSE) {
+ rc = EIO;
+ goto fail1;
+ }
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t privilege_mask = encp->enc_privilege_mask;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * Use privilege mask state at MCDI attach.
+ */
+
+ switch (id) {
+ case EFX_MCDI_FEATURE_FW_UPDATE:
+ /*
+ * Admin privilege must be used prior to introduction of
+ * specific flag.
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ case EFX_MCDI_FEATURE_LINK_CONTROL:
+ /*
+ * Admin privilege used prior to introduction of
+ * specific flag.
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, LINK) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ case EFX_MCDI_FEATURE_MACADDR_CHANGE:
+ /*
+ * Admin privilege must be used prior to introduction of
+ * mac spoofing privilege (at v4.6), which is used up to
+ * introduction of change mac spoofing privilege (at v4.7)
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, CHANGE_MAC) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ case EFX_MCDI_FEATURE_MAC_SPOOFING:
+ /*
+ * Admin privilege must be used prior to introduction of
+ * mac spoofing privilege (at v4.6), which is used up to
+ * introduction of mac spoofing TX privilege (at v4.7)
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING_TX) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MCDI */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/ef10_nic.c b/src/seastar/dpdk/drivers/net/sfc/base/ef10_nic.c
new file mode 100644
index 00000000..aac2679c
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/ef10_nic.c
@@ -0,0 +1,1780 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#include "ef10_tlv_layout.h"
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_port_assignment(
+ __in efx_nic_t *enp,
+ __out uint32_t *portp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
+ MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_port_modes(
+ __in efx_nic_t *enp,
+ __out uint32_t *modesp,
+ __out_opt uint32_t *current_modep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PORT_MODES_IN_LEN,
+ MC_CMD_GET_PORT_MODES_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PORT_MODES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /*
+ * Require only Modes and DefaultMode fields, unless the current mode
+ * was requested (CurrentMode field was added for Medford).
+ */
+ if (req.emr_out_length_used <
+ MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+ if ((current_modep != NULL) && (req.emr_out_length_used <
+ MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
+
+ if (current_modep != NULL) {
+ *current_modep = MCDI_OUT_DWORD(req,
+ GET_PORT_MODES_OUT_CURRENT_MODE);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_get_port_mode_bandwidth(
+ __in uint32_t port_mode,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ switch (port_mode) {
+ case TLV_PORT_MODE_10G:
+ bandwidth = 10000;
+ break;
+ case TLV_PORT_MODE_10G_10G:
+ bandwidth = 10000 * 2;
+ break;
+ case TLV_PORT_MODE_10G_10G_10G_10G:
+ case TLV_PORT_MODE_10G_10G_10G_10G_Q:
+ case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2:
+ case TLV_PORT_MODE_10G_10G_10G_10G_Q2:
+ bandwidth = 10000 * 4;
+ break;
+ case TLV_PORT_MODE_40G:
+ bandwidth = 40000;
+ break;
+ case TLV_PORT_MODE_40G_40G:
+ bandwidth = 40000 * 2;
+ break;
+ case TLV_PORT_MODE_40G_10G_10G:
+ case TLV_PORT_MODE_10G_10G_40G:
+ bandwidth = 40000 + (10000 * 2);
+ break;
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ *bandwidth_mbpsp = bandwidth;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_vadaptor_alloc(
+ __in efx_nic_t *enp,
+ __in uint32_t port_id)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VADAPTOR_ALLOC_IN_LEN,
+ MC_CMD_VADAPTOR_ALLOC_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+ MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
+ VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
+ enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_vadaptor_free(
+ __in efx_nic_t *enp,
+ __in uint32_t port_id)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VADAPTOR_FREE_IN_LEN,
+ MC_CMD_VADAPTOR_FREE_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VADAPTOR_FREE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_pf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6])
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
+ MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ if (mac_addrp != NULL) {
+ uint8_t *addrp;
+
+ addrp = MCDI_OUT2(req, uint8_t,
+ GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
+
+ EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_vf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6])
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
+ MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used <
+ MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req,
+ VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ if (mac_addrp != NULL) {
+ uint8_t *addrp;
+
+ addrp = MCDI_OUT2(req, uint8_t,
+ VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
+
+ EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_clock(
+ __in efx_nic_t *enp,
+ __out uint32_t *sys_freqp,
+ __out uint32_t *dpcpu_freqp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_CLOCK_IN_LEN,
+ MC_CMD_GET_CLOCK_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_CLOCK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
+ if (*sys_freqp == 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ *dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ);
+ if (*dpcpu_freqp == 0) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_vector_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *vec_basep,
+ __out_opt uint32_t *pf_nvecp,
+ __out_opt uint32_t *vf_nvecp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_VECTOR_CFG_IN_LEN,
+ MC_CMD_GET_VECTOR_CFG_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (vec_basep != NULL)
+ *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
+ if (pf_nvecp != NULL)
+ *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
+ if (vf_nvecp != NULL)
+ *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_alloc_vis(
+ __in efx_nic_t *enp,
+ __in uint32_t min_vi_count,
+ __in uint32_t max_vi_count,
+ __out uint32_t *vi_basep,
+ __out uint32_t *vi_countp,
+ __out uint32_t *vi_shiftp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ALLOC_VIS_IN_LEN,
+ MC_CMD_ALLOC_VIS_EXT_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (vi_countp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ALLOC_VIS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ALLOC_VIS_EXT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
+ MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
+ *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
+
+ /* Report VI_SHIFT if available (always zero for Huntington) */
+ if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
+ *vi_shiftp = 0;
+ else
+ *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_free_vis(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
+ EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
+
+ req.emr_cmd = MC_CMD_FREE_VIS;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ /* Ignore ELREADY (no allocated VIs, so nothing to free) */
+ if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_alloc_piobuf(
+ __in efx_nic_t *enp,
+ __out efx_piobuf_handle_t *handlep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ALLOC_PIOBUF_IN_LEN,
+ MC_CMD_ALLOC_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (handlep == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_free_piobuf(
+ __in efx_nic_t *enp,
+ __in efx_piobuf_handle_t handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FREE_PIOBUF_IN_LEN,
+ MC_CMD_FREE_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FREE_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_link_piobuf(
+ __in efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LINK_PIOBUF_IN_LEN,
+ MC_CMD_LINK_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LINK_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
+ MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_unlink_piobuf(
+ __in efx_nic_t *enp,
+ __in uint32_t vi_index)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_UNLINK_PIOBUF_IN_LEN,
+ MC_CMD_UNLINK_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+ef10_nic_alloc_piobufs(
+ __in efx_nic_t *enp,
+ __in uint32_t max_piobuf_count)
+{
+ efx_piobuf_handle_t *handlep;
+ unsigned int i;
+
+ EFSYS_ASSERT3U(max_piobuf_count, <=,
+ EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
+
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+
+ for (i = 0; i < max_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ if (efx_mcdi_alloc_piobuf(enp, handlep) != 0)
+ goto fail1;
+
+ enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
+ enp->en_arch.ef10.ena_piobuf_count++;
+ }
+
+ return;
+
+fail1:
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ efx_mcdi_free_piobuf(enp, *handlep);
+ *handlep = EFX_PIOBUF_HANDLE_INVALID;
+ }
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+}
+
+
+static void
+ef10_nic_free_piobufs(
+ __in efx_nic_t *enp)
+{
+ efx_piobuf_handle_t *handlep;
+ unsigned int i;
+
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ efx_mcdi_free_piobuf(enp, *handlep);
+ *handlep = EFX_PIOBUF_HANDLE_INVALID;
+ }
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+}
+
+/* Sub-allocate a block from a piobuf */
+ __checkReturn efx_rc_t
+ef10_nic_pio_alloc(
+ __inout efx_nic_t *enp,
+ __out uint32_t *bufnump,
+ __out efx_piobuf_handle_t *handlep,
+ __out uint32_t *blknump,
+ __out uint32_t *offsetp,
+ __out size_t *sizep)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
+ uint32_t blk_per_buf;
+ uint32_t buf, blk;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+ EFSYS_ASSERT(bufnump);
+ EFSYS_ASSERT(handlep);
+ EFSYS_ASSERT(blknump);
+ EFSYS_ASSERT(offsetp);
+ EFSYS_ASSERT(sizep);
+
+ if ((edcp->edc_pio_alloc_size == 0) ||
+ (enp->en_arch.ef10.ena_piobuf_count == 0)) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+ blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
+
+ for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
+ uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
+
+ if (~(*map) == 0)
+ continue;
+
+ EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
+ for (blk = 0; blk < blk_per_buf; blk++) {
+ if ((*map & (1u << blk)) == 0) {
+ *map |= (1u << blk);
+ goto done;
+ }
+ }
+ }
+ rc = ENOMEM;
+ goto fail2;
+
+done:
+ *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
+ *bufnump = buf;
+ *blknump = blk;
+ *sizep = edcp->edc_pio_alloc_size;
+ *offsetp = blk * (*sizep);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Free a piobuf sub-allocated block */
+ __checkReturn efx_rc_t
+ef10_nic_pio_free(
+ __inout efx_nic_t *enp,
+ __in uint32_t bufnum,
+ __in uint32_t blknum)
+{
+ uint32_t *map;
+ efx_rc_t rc;
+
+ if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
+ (blknum >= (8 * sizeof (*map)))) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
+ if ((*map & (1u << blknum)) == 0) {
+ rc = ENOENT;
+ goto fail2;
+ }
+ *map &= ~(1u << blknum);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_pio_link(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle)
+{
+ return (efx_mcdi_link_piobuf(enp, vi_index, handle));
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_pio_unlink(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index)
+{
+ return (efx_mcdi_unlink_piobuf(enp, vi_index));
+}
+
+static __checkReturn efx_rc_t
+ef10_mcdi_get_pf_count(
+ __in efx_nic_t *enp,
+ __out uint32_t *pf_countp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PF_COUNT_IN_LEN,
+ MC_CMD_GET_PF_COUNT_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PF_COUNT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *pf_countp = *MCDI_OUT(req, uint8_t,
+ MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST);
+
+ EFSYS_ASSERT(*pf_countp != 0);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_get_datapath_caps(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t flags;
+ uint32_t flags2;
+ uint32_t tso2nc;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_get_capabilities(enp, &flags, NULL, NULL,
+ &flags2, &tso2nc)) != 0)
+ goto fail1;
+
+ if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0)
+ goto fail1;
+
+#define CAP_FLAG(flags1, field) \
+ ((flags1) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
+
+#define CAP_FLAG2(flags2, field) \
+ ((flags2) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
+
+ /*
+ * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
+ * We only support the 14 byte prefix here.
+ */
+ if (CAP_FLAG(flags, RX_PREFIX_LEN_14) == 0) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ encp->enc_rx_prefix_size = 14;
+
+ /* Check if the firmware supports TSO */
+ encp->enc_fw_assisted_tso_enabled =
+ CAP_FLAG(flags, TX_TSO) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports FATSOv2 */
+ encp->enc_fw_assisted_tso_v2_enabled =
+ CAP_FLAG2(flags2, TX_TSO_V2) ? B_TRUE : B_FALSE;
+
+ /* Get the number of TSO contexts (FATSOv2) */
+ encp->enc_fw_assisted_tso_v2_n_contexts =
+ CAP_FLAG2(flags2, TX_TSO_V2) ? tso2nc : 0;
+
+ /* Check if the firmware has vadapter/vport/vswitch support */
+ encp->enc_datapath_cap_evb =
+ CAP_FLAG(flags, EVB) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports VLAN insertion */
+ encp->enc_hw_tx_insert_vlan_enabled =
+ CAP_FLAG(flags, TX_VLAN_INSERTION) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports RX event batching */
+ encp->enc_rx_batching_enabled =
+ CAP_FLAG(flags, RX_BATCHING) ? B_TRUE : B_FALSE;
+
+ /*
+ * Even if batching isn't reported as supported, we may still get
+ * batched events (see bug61153).
+ */
+ encp->enc_rx_batch_max = 16;
+
+ /* Check if the firmware supports disabling scatter on RXQs */
+ encp->enc_rx_disable_scatter_supported =
+ CAP_FLAG(flags, RX_DISABLE_SCATTER) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports packed stream mode */
+ encp->enc_rx_packed_stream_supported =
+ CAP_FLAG(flags, RX_PACKED_STREAM) ? B_TRUE : B_FALSE;
+
+ /*
+ * Check if the firmware supports configurable buffer sizes
+ * for packed stream mode (otherwise buffer size is 1Mbyte)
+ */
+ encp->enc_rx_var_packed_stream_supported =
+ CAP_FLAG(flags, RX_PACKED_STREAM_VAR_BUFFERS) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports set mac with running filters */
+ encp->enc_allow_set_mac_with_installed_filters =
+ CAP_FLAG(flags, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED) ?
+ B_TRUE : B_FALSE;
+
+ /*
+ * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
+ * specifying which parameters to configure.
+ */
+ encp->enc_enhanced_set_mac_supported =
+ CAP_FLAG(flags, SET_MAC_ENHANCED) ? B_TRUE : B_FALSE;
+
+ /*
+ * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows
+ * us to let the firmware choose the settings to use on an EVQ.
+ */
+ encp->enc_init_evq_v2_supported =
+ CAP_FLAG2(flags2, INIT_EVQ_V2) ? B_TRUE : B_FALSE;
+
+ /*
+ * Check if firmware-verified NVRAM updates must be used.
+ *
+ * The firmware trusted installer requires all NVRAM updates to use
+ * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update)
+ * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated
+ * partition and report the result).
+ */
+ encp->enc_fw_verified_nvram_update_required =
+ CAP_FLAG2(flags2, NVRAM_UPDATE_REPORT_VERIFY_RESULT) ?
+ B_TRUE : B_FALSE;
+
+ /*
+ * Check if firmware provides packet memory and Rx datapath
+ * counters.
+ */
+ encp->enc_pm_and_rxdp_counters =
+ CAP_FLAG(flags, PM_AND_RXDP_COUNTERS) ? B_TRUE : B_FALSE;
+
+ /*
+ * Check if the 40G MAC hardware is capable of reporting
+ * statistics for Tx size bins.
+ */
+ encp->enc_mac_stats_40g_tx_size_bins =
+ CAP_FLAG2(flags2, MAC_STATS_40G_TX_SIZE_BINS) ? B_TRUE : B_FALSE;
+
+#undef CAP_FLAG
+#undef CAP_FLAG2
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#define EF10_LEGACY_PF_PRIVILEGE_MASK \
+ (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS)
+
+#define EF10_LEGACY_VF_PRIVILEGE_MASK 0
+
+
+ __checkReturn efx_rc_t
+ef10_get_privilege_mask(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t mask;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
+ &mask)) != 0) {
+ if (rc != ENOTSUP)
+ goto fail1;
+
+ /* Fallback for old firmware without privilege mask support */
+ if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+ /* Assume PF has admin privilege */
+ mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
+ } else {
+ /* VF is always unprivileged by default */
+ mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
+ }
+ }
+
+ *maskp = mask;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * Table of mapping schemes from port number to the number of the external
+ * connector on the board. The external numbering does not distinguish
+ * off-board separated outputs such as from multi-headed cables.
+ *
+ * The count of adjacent port numbers that map to each external port
+ * and the offset in the numbering, is determined by the chip family and
+ * current port mode.
+ *
+ * For the Huntington family, the current port mode cannot be discovered,
+ * so the mapping used is instead the last match in the table to the full
+ * set of port modes to which the NIC can be configured. Therefore the
+ * ordering of entries in the the mapping table is significant.
+ */
+static struct {
+ efx_family_t family;
+ uint32_t modes_mask;
+ int32_t count;
+ int32_t offset;
+} __ef10_external_port_mappings[] = {
+ /* Supported modes with 1 output per external port */
+ {
+ EFX_FAMILY_HUNTINGTON,
+ (1 << TLV_PORT_MODE_10G) |
+ (1 << TLV_PORT_MODE_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G),
+ 1,
+ 1
+ },
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_10G) |
+ (1 << TLV_PORT_MODE_10G_10G),
+ 1,
+ 1
+ },
+ /* Supported modes with 2 outputs per external port */
+ {
+ EFX_FAMILY_HUNTINGTON,
+ (1 << TLV_PORT_MODE_40G) |
+ (1 << TLV_PORT_MODE_40G_40G) |
+ (1 << TLV_PORT_MODE_40G_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_40G),
+ 2,
+ 1
+ },
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_40G) |
+ (1 << TLV_PORT_MODE_40G_40G) |
+ (1 << TLV_PORT_MODE_40G_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_40G) |
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2),
+ 2,
+ 1
+ },
+ /* Supported modes with 4 outputs per external port */
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q) |
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1),
+ 4,
+ 1,
+ },
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q2),
+ 4,
+ 2
+ },
+};
+
+ __checkReturn efx_rc_t
+ef10_external_port_mapping(
+ __in efx_nic_t *enp,
+ __in uint32_t port,
+ __out uint8_t *external_portp)
+{
+ efx_rc_t rc;
+ int i;
+ uint32_t port_modes;
+ uint32_t matches;
+ uint32_t current;
+ int32_t count = 1; /* Default 1-1 mapping */
+ int32_t offset = 1; /* Default starting external port number */
+
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, &current)) != 0) {
+ /*
+ * No current port mode information
+ * - infer mapping from available modes
+ */
+ if ((rc = efx_mcdi_get_port_modes(enp,
+ &port_modes, NULL)) != 0) {
+ /*
+ * No port mode information available
+ * - use default mapping
+ */
+ goto out;
+ }
+ } else {
+ /* Only need to scan the current mode */
+ port_modes = 1 << current;
+ }
+
+ /*
+ * Infer the internal port -> external port mapping from
+ * the possible port modes for this NIC.
+ */
+ for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
+ if (__ef10_external_port_mappings[i].family !=
+ enp->en_family)
+ continue;
+ matches = (__ef10_external_port_mappings[i].modes_mask &
+ port_modes);
+ if (matches != 0) {
+ count = __ef10_external_port_mappings[i].count;
+ offset = __ef10_external_port_mappings[i].offset;
+ port_modes &= ~matches;
+ }
+ }
+
+ if (port_modes != 0) {
+ /* Some advertised modes are not supported */
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+out:
+ /*
+ * Scale as required by last matched mode and then convert to
+ * correctly offset numbering
+ */
+ *external_portp = (uint8_t)((port / count) + offset);
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+ef10_nic_probe(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Read and clear any assertion state */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail1;
+
+ /* Exit the assertion handler */
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ if (rc != EACCES)
+ goto fail2;
+
+ if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
+ goto fail3;
+
+ if ((rc = enop->eno_board_cfg(enp)) != 0)
+ if (rc != EACCES)
+ goto fail4;
+
+ /*
+ * Set default driver config limits (based on board config).
+ *
+ * FIXME: For now allocate a fixed number of VIs which is likely to be
+ * sufficient and small enough to allow multiple functions on the same
+ * port.
+ */
+ edcp->edc_min_vi_count = edcp->edc_max_vi_count =
+ MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
+
+ /* The client driver must configure and enable PIO buffer support */
+ edcp->edc_max_piobuf_count = 0;
+ edcp->edc_pio_alloc_size = 0;
+
+#if EFSYS_OPT_MAC_STATS
+ /* Wipe the MAC statistics */
+ if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
+ goto fail5;
+#endif
+
+#if EFSYS_OPT_LOOPBACK
+ if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
+ goto fail6;
+#endif
+
+#if EFSYS_OPT_MON_STATS
+ if ((rc = mcdi_mon_cfg_build(enp)) != 0) {
+ /* Unprivileged functions do not have access to sensors */
+ if (rc != EACCES)
+ goto fail7;
+ }
+#endif
+
+ encp->enc_features = enp->en_features;
+
+ return (0);
+
+#if EFSYS_OPT_MON_STATS
+fail7:
+ EFSYS_PROBE(fail7);
+#endif
+#if EFSYS_OPT_LOOPBACK
+fail6:
+ EFSYS_PROBE(fail6);
+#endif
+#if EFSYS_OPT_MAC_STATS
+fail5:
+ EFSYS_PROBE(fail5);
+#endif
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ uint32_t min_evq_count, max_evq_count;
+ uint32_t min_rxq_count, max_rxq_count;
+ uint32_t min_txq_count, max_txq_count;
+ efx_rc_t rc;
+
+ if (edlp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Get minimum required and maximum usable VI limits */
+ min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
+ min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
+ min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
+
+ edcp->edc_min_vi_count =
+ MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
+
+ max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
+ max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
+ max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
+
+ edcp->edc_max_vi_count =
+ MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
+
+ /*
+ * Check limits for sub-allocated piobuf blocks.
+ * PIO is optional, so don't fail if the limits are incorrect.
+ */
+ if ((encp->enc_piobuf_size == 0) ||
+ (encp->enc_piobuf_limit == 0) ||
+ (edlp->edl_min_pio_alloc_size == 0) ||
+ (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
+ /* Disable PIO */
+ edcp->edc_max_piobuf_count = 0;
+ edcp->edc_pio_alloc_size = 0;
+ } else {
+ uint32_t blk_size, blk_count, blks_per_piobuf;
+
+ blk_size =
+ MAX(edlp->edl_min_pio_alloc_size,
+ encp->enc_piobuf_min_alloc_size);
+
+ blks_per_piobuf = encp->enc_piobuf_size / blk_size;
+ EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
+
+ blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
+
+ /* A zero max pio alloc count means unlimited */
+ if ((edlp->edl_max_pio_alloc_count > 0) &&
+ (edlp->edl_max_pio_alloc_count < blk_count)) {
+ blk_count = edlp->edl_max_pio_alloc_count;
+ }
+
+ edcp->edc_pio_alloc_size = blk_size;
+ edcp->edc_max_piobuf_count =
+ (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+ef10_nic_reset(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ENTITY_RESET_IN_LEN,
+ MC_CMD_ENTITY_RESET_OUT_LEN)];
+ efx_rc_t rc;
+
+ /* ef10_nic_reset() is called to recover from BADASSERT failures. */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail1;
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ goto fail2;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ENTITY_RESET;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
+
+ MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
+ ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ /* Clear RX/TX DMA queue errors */
+ enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_init(
+ __in efx_nic_t *enp)
+{
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ uint32_t min_vi_count, max_vi_count;
+ uint32_t vi_count, vi_base, vi_shift;
+ uint32_t i;
+ uint32_t retry;
+ uint32_t delay_us;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Enable reporting of some events (e.g. link change) */
+ if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
+ goto fail1;
+
+ /* Allocate (optional) on-chip PIO buffers */
+ ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
+
+ /*
+ * For best performance, PIO writes should use a write-combined
+ * (WC) memory mapping. Using a separate WC mapping for the PIO
+ * aperture of each VI would be a burden to drivers (and not
+ * possible if the host page size is >4Kbyte).
+ *
+ * To avoid this we use a single uncached (UC) mapping for VI
+ * register access, and a single WC mapping for extra VIs used
+ * for PIO writes.
+ *
+ * Each piobuf must be linked to a VI in the WC mapping, and to
+ * each VI that is using a sub-allocated block from the piobuf.
+ */
+ min_vi_count = edcp->edc_min_vi_count;
+ max_vi_count =
+ edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
+
+ /* Ensure that the previously attached driver's VIs are freed */
+ if ((rc = efx_mcdi_free_vis(enp)) != 0)
+ goto fail2;
+
+ /*
+ * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
+ * fails then retrying the request for fewer VI resources may succeed.
+ */
+ vi_count = 0;
+ if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
+ &vi_base, &vi_count, &vi_shift)) != 0)
+ goto fail3;
+
+ EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
+
+ if (vi_count < min_vi_count) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+
+ enp->en_arch.ef10.ena_vi_base = vi_base;
+ enp->en_arch.ef10.ena_vi_count = vi_count;
+ enp->en_arch.ef10.ena_vi_shift = vi_shift;
+
+ if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
+ /* Not enough extra VIs to map piobufs */
+ ef10_nic_free_piobufs(enp);
+ }
+
+ enp->en_arch.ef10.ena_pio_write_vi_base =
+ vi_count - enp->en_arch.ef10.ena_piobuf_count;
+
+ /* Save UC memory mapping details */
+ enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ enp->en_arch.ef10.ena_uc_mem_map_size =
+ (ER_DZ_TX_PIOBUF_STEP *
+ enp->en_arch.ef10.ena_pio_write_vi_base);
+ } else {
+ enp->en_arch.ef10.ena_uc_mem_map_size =
+ (ER_DZ_TX_PIOBUF_STEP *
+ enp->en_arch.ef10.ena_vi_count);
+ }
+
+ /* Save WC memory mapping details */
+ enp->en_arch.ef10.ena_wc_mem_map_offset =
+ enp->en_arch.ef10.ena_uc_mem_map_offset +
+ enp->en_arch.ef10.ena_uc_mem_map_size;
+
+ enp->en_arch.ef10.ena_wc_mem_map_size =
+ (ER_DZ_TX_PIOBUF_STEP *
+ enp->en_arch.ef10.ena_piobuf_count);
+
+ /* Link piobufs to extra VIs in WC mapping */
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ rc = efx_mcdi_link_piobuf(enp,
+ enp->en_arch.ef10.ena_pio_write_vi_base + i,
+ enp->en_arch.ef10.ena_piobuf_handle[i]);
+ if (rc != 0)
+ break;
+ }
+ }
+
+ /*
+ * Allocate a vAdaptor attached to our upstream vPort/pPort.
+ *
+ * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
+ * driver has yet to bring up the EVB port. See bug 56147. In this case,
+ * retry the request several times after waiting a while. The wait time
+ * between retries starts small (10ms) and exponentially increases.
+ * Total wait time is a little over two seconds. Retry logic in the
+ * client driver may mean this whole loop is repeated if it continues to
+ * fail.
+ */
+ retry = 0;
+ delay_us = 10000;
+ while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
+ if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
+ (rc != ENOENT)) {
+ /*
+ * Do not retry alloc for PF, or for other errors on
+ * a VF.
+ */
+ goto fail5;
+ }
+
+ /* VF startup before PF is ready. Retry allocation. */
+ if (retry > 5) {
+ /* Too many attempts */
+ rc = EINVAL;
+ goto fail6;
+ }
+ EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
+ EFSYS_SLEEP(delay_us);
+ retry++;
+ if (delay_us < 500000)
+ delay_us <<= 2;
+ }
+
+ enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
+ enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
+
+ return (0);
+
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+
+ ef10_nic_free_piobufs(enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *vi_countp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * Report VIs that the client driver can use.
+ * Do not include VIs used for PIO buffer writes.
+ */
+ *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
+
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * TODO: Specify host memory mapping alignment and granularity
+ * in efx_drv_limits_t so that they can be taken into account
+ * when allocating extra VIs for PIO writes.
+ */
+ switch (region) {
+ case EFX_REGION_VI:
+ /* UC mapped memory BAR region for VI registers */
+ *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
+ *sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
+ break;
+
+ case EFX_REGION_PIO_WRITE_VI:
+ /* WC mapped memory BAR region for piobuf writes */
+ *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
+ *sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_nic_fini(
+ __in efx_nic_t *enp)
+{
+ uint32_t i;
+ efx_rc_t rc;
+
+ (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
+ enp->en_vport_id = 0;
+
+ /* Unlink piobufs from extra VIs in WC mapping */
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ rc = efx_mcdi_unlink_piobuf(enp,
+ enp->en_arch.ef10.ena_pio_write_vi_base + i);
+ if (rc != 0)
+ break;
+ }
+ }
+
+ ef10_nic_free_piobufs(enp);
+
+ (void) efx_mcdi_free_vis(enp);
+ enp->en_arch.ef10.ena_vi_count = 0;
+}
+
+ void
+ef10_nic_unprobe(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_MON_STATS
+ mcdi_mon_cfg_free(enp);
+#endif /* EFSYS_OPT_MON_STATS */
+ (void) efx_mcdi_drv_attach(enp, B_FALSE);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+ef10_nic_register_test(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ /* FIXME */
+ _NOTE(ARGUNUSED(enp))
+ _NOTE(CONSTANTCONDITION)
+ if (B_FALSE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ /* FIXME */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/ef10_nvram.c b/src/seastar/dpdk/drivers/net/sfc/base/ef10_nvram.c
new file mode 100644
index 00000000..3f9d3750
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/ef10_nvram.c
@@ -0,0 +1,2385 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+#include "ef10_tlv_layout.h"
+
+/* Cursor for TLV partition format */
+typedef struct tlv_cursor_s {
+ uint32_t *block; /* Base of data block */
+ uint32_t *current; /* Cursor position */
+ uint32_t *end; /* End tag position */
+ uint32_t *limit; /* Last dword of data block */
+} tlv_cursor_t;
+
+typedef struct nvram_partition_s {
+ uint16_t type;
+ uint8_t chip_select;
+ uint8_t flags;
+ /*
+ * The full length of the NVRAM partition.
+ * This is different from tlv_partition_header.total_length,
+ * which can be smaller.
+ */
+ uint32_t length;
+ uint32_t erase_size;
+ uint32_t *data;
+ tlv_cursor_t tlv_cursor;
+} nvram_partition_t;
+
+
+static __checkReturn efx_rc_t
+tlv_validate_state(
+ __inout tlv_cursor_t *cursor);
+
+
+static void
+tlv_init_block(
+ __out uint32_t *block)
+{
+ *block = __CPU_TO_LE_32(TLV_TAG_END);
+}
+
+static uint32_t
+tlv_tag(
+ __in tlv_cursor_t *cursor)
+{
+ uint32_t dword, tag;
+
+ dword = cursor->current[0];
+ tag = __LE_TO_CPU_32(dword);
+
+ return (tag);
+}
+
+static size_t
+tlv_length(
+ __in tlv_cursor_t *cursor)
+{
+ uint32_t dword, length;
+
+ if (tlv_tag(cursor) == TLV_TAG_END)
+ return (0);
+
+ dword = cursor->current[1];
+ length = __LE_TO_CPU_32(dword);
+
+ return ((size_t)length);
+}
+
+static uint8_t *
+tlv_value(
+ __in tlv_cursor_t *cursor)
+{
+ if (tlv_tag(cursor) == TLV_TAG_END)
+ return (NULL);
+
+ return ((uint8_t *)(&cursor->current[2]));
+}
+
+static uint8_t *
+tlv_item(
+ __in tlv_cursor_t *cursor)
+{
+ if (tlv_tag(cursor) == TLV_TAG_END)
+ return (NULL);
+
+ return ((uint8_t *)cursor->current);
+}
+
+/*
+ * TLV item DWORD length is tag + length + value (rounded up to DWORD)
+ * equivalent to tlv_n_words_for_len in mc-comms tlv.c
+ */
+#define TLV_DWORD_COUNT(length) \
+ (1 + 1 + (((length) + sizeof (uint32_t) - 1) / sizeof (uint32_t)))
+
+
+static uint32_t *
+tlv_next_item_ptr(
+ __in tlv_cursor_t *cursor)
+{
+ uint32_t length;
+
+ length = tlv_length(cursor);
+
+ return (cursor->current + TLV_DWORD_COUNT(length));
+}
+
+static __checkReturn efx_rc_t
+tlv_advance(
+ __inout tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if (cursor->current == cursor->end) {
+ /* No more tags after END tag */
+ cursor->current = NULL;
+ rc = ENOENT;
+ goto fail2;
+ }
+
+ /* Advance to next item and validate */
+ cursor->current = tlv_next_item_ptr(cursor);
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static efx_rc_t
+tlv_rewind(
+ __in tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ cursor->current = cursor->block;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static efx_rc_t
+tlv_find(
+ __inout tlv_cursor_t *cursor,
+ __in uint32_t tag)
+{
+ efx_rc_t rc;
+
+ rc = tlv_rewind(cursor);
+ while (rc == 0) {
+ if (tlv_tag(cursor) == tag)
+ break;
+
+ rc = tlv_advance(cursor);
+ }
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+tlv_validate_state(
+ __inout tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ /* Check cursor position */
+ if (cursor->current < cursor->block) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (cursor->current > cursor->limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if (tlv_tag(cursor) != TLV_TAG_END) {
+ /* Check current item has space for tag and length */
+ if (cursor->current > (cursor->limit - 2)) {
+ cursor->current = NULL;
+ rc = EFAULT;
+ goto fail3;
+ }
+
+ /* Check we have value data for current item and another tag */
+ if (tlv_next_item_ptr(cursor) > (cursor->limit - 1)) {
+ cursor->current = NULL;
+ rc = EFAULT;
+ goto fail4;
+ }
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static efx_rc_t
+tlv_init_cursor(
+ __out tlv_cursor_t *cursor,
+ __in uint32_t *block,
+ __in uint32_t *limit,
+ __in uint32_t *current)
+{
+ cursor->block = block;
+ cursor->limit = limit;
+
+ cursor->current = current;
+ cursor->end = NULL;
+
+ return (tlv_validate_state(cursor));
+}
+
+static __checkReturn efx_rc_t
+tlv_init_cursor_from_size(
+ __out tlv_cursor_t *cursor,
+ __in_bcount(size)
+ uint8_t *block,
+ __in size_t size)
+{
+ uint32_t *limit;
+ limit = (uint32_t *)(block + size - sizeof (uint32_t));
+ return (tlv_init_cursor(cursor, (uint32_t *)block,
+ limit, (uint32_t *)block));
+}
+
+static __checkReturn efx_rc_t
+tlv_init_cursor_at_offset(
+ __out tlv_cursor_t *cursor,
+ __in_bcount(size)
+ uint8_t *block,
+ __in size_t size,
+ __in size_t offset)
+{
+ uint32_t *limit;
+ uint32_t *current;
+ limit = (uint32_t *)(block + size - sizeof (uint32_t));
+ current = (uint32_t *)(block + offset);
+ return (tlv_init_cursor(cursor, (uint32_t *)block, limit, current));
+}
+
+static __checkReturn efx_rc_t
+tlv_require_end(
+ __inout tlv_cursor_t *cursor)
+{
+ uint32_t *pos;
+ efx_rc_t rc;
+
+ if (cursor->end == NULL) {
+ pos = cursor->current;
+ if ((rc = tlv_find(cursor, TLV_TAG_END)) != 0)
+ goto fail1;
+
+ cursor->end = cursor->current;
+ cursor->current = pos;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static size_t
+tlv_block_length_used(
+ __inout tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail2;
+
+ /* Return space used (including the END tag) */
+ return (cursor->end + 1 - cursor->block) * sizeof (uint32_t);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (0);
+}
+
+static uint32_t *
+tlv_last_segment_end(
+ __in tlv_cursor_t *cursor)
+{
+ tlv_cursor_t segment_cursor;
+ uint32_t *last_segment_end = cursor->block;
+ uint32_t *segment_start = cursor->block;
+
+ /*
+ * Go through each segment and check that it has an end tag. If there
+ * is no end tag then the previous segment was the last valid one,
+ * so return the pointer to its end tag.
+ */
+ for (;;) {
+ if (tlv_init_cursor(&segment_cursor, segment_start,
+ cursor->limit, segment_start) != 0)
+ break;
+ if (tlv_require_end(&segment_cursor) != 0)
+ break;
+ last_segment_end = segment_cursor.end;
+ segment_start = segment_cursor.end + 1;
+ }
+
+ return (last_segment_end);
+}
+
+
+static uint32_t *
+tlv_write(
+ __in tlv_cursor_t *cursor,
+ __in uint32_t tag,
+ __in_bcount(size) uint8_t *data,
+ __in size_t size)
+{
+ uint32_t len = size;
+ uint32_t *ptr;
+
+ ptr = cursor->current;
+
+ *ptr++ = __CPU_TO_LE_32(tag);
+ *ptr++ = __CPU_TO_LE_32(len);
+
+ if (len > 0) {
+ ptr[(len - 1) / sizeof (uint32_t)] = 0;
+ memcpy(ptr, data, len);
+ ptr += P2ROUNDUP(len, sizeof (uint32_t)) / sizeof (*ptr);
+ }
+
+ return (ptr);
+}
+
+static __checkReturn efx_rc_t
+tlv_insert(
+ __inout tlv_cursor_t *cursor,
+ __in uint32_t tag,
+ __in_bcount(size)
+ uint8_t *data,
+ __in size_t size)
+{
+ unsigned int delta;
+ uint32_t *last_segment_end;
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail2;
+
+ if (tag == TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ last_segment_end = tlv_last_segment_end(cursor);
+
+ delta = TLV_DWORD_COUNT(size);
+ if (last_segment_end + 1 + delta > cursor->limit) {
+ rc = ENOSPC;
+ goto fail4;
+ }
+
+ /* Move data up: new space at cursor->current */
+ memmove(cursor->current + delta, cursor->current,
+ (last_segment_end + 1 - cursor->current) * sizeof (uint32_t));
+
+ /* Adjust the end pointer */
+ cursor->end += delta;
+
+ /* Write new TLV item */
+ tlv_write(cursor, tag, data, size);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+tlv_delete(
+ __inout tlv_cursor_t *cursor)
+{
+ unsigned int delta;
+ uint32_t *last_segment_end;
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if (tlv_tag(cursor) == TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ delta = TLV_DWORD_COUNT(tlv_length(cursor));
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail3;
+
+ last_segment_end = tlv_last_segment_end(cursor);
+
+ /* Shuffle things down, destroying the item at cursor->current */
+ memmove(cursor->current, cursor->current + delta,
+ (last_segment_end + 1 - cursor->current) * sizeof (uint32_t));
+ /* Zero the new space at the end of the TLV chain */
+ memset(last_segment_end + 1 - delta, 0, delta * sizeof (uint32_t));
+ /* Adjust the end pointer */
+ cursor->end -= delta;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+tlv_modify(
+ __inout tlv_cursor_t *cursor,
+ __in uint32_t tag,
+ __in_bcount(size)
+ uint8_t *data,
+ __in size_t size)
+{
+ uint32_t *pos;
+ unsigned int old_ndwords;
+ unsigned int new_ndwords;
+ unsigned int delta;
+ uint32_t *last_segment_end;
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if (tlv_tag(cursor) == TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ if (tlv_tag(cursor) != tag) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ old_ndwords = TLV_DWORD_COUNT(tlv_length(cursor));
+ new_ndwords = TLV_DWORD_COUNT(size);
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail4;
+
+ last_segment_end = tlv_last_segment_end(cursor);
+
+ if (new_ndwords > old_ndwords) {
+ /* Expand space used for TLV item */
+ delta = new_ndwords - old_ndwords;
+ pos = cursor->current + old_ndwords;
+
+ if (last_segment_end + 1 + delta > cursor->limit) {
+ rc = ENOSPC;
+ goto fail5;
+ }
+
+ /* Move up: new space at (cursor->current + old_ndwords) */
+ memmove(pos + delta, pos,
+ (last_segment_end + 1 - pos) * sizeof (uint32_t));
+
+ /* Adjust the end pointer */
+ cursor->end += delta;
+
+ } else if (new_ndwords < old_ndwords) {
+ /* Shrink space used for TLV item */
+ delta = old_ndwords - new_ndwords;
+ pos = cursor->current + new_ndwords;
+
+ /* Move down: remove words at (cursor->current + new_ndwords) */
+ memmove(pos, pos + delta,
+ (last_segment_end + 1 - pos) * sizeof (uint32_t));
+
+ /* Zero the new space at the end of the TLV chain */
+ memset(last_segment_end + 1 - delta, 0,
+ delta * sizeof (uint32_t));
+
+ /* Adjust the end pointer */
+ cursor->end -= delta;
+ }
+
+ /* Write new data */
+ tlv_write(cursor, tag, data, size);
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static uint32_t checksum_tlv_partition(
+ __in nvram_partition_t *partition)
+{
+ tlv_cursor_t *cursor;
+ uint32_t *ptr;
+ uint32_t *end;
+ uint32_t csum;
+ size_t len;
+
+ cursor = &partition->tlv_cursor;
+ len = tlv_block_length_used(cursor);
+ EFSYS_ASSERT3U((len & 3), ==, 0);
+
+ csum = 0;
+ ptr = partition->data;
+ end = &ptr[len >> 2];
+
+ while (ptr < end)
+ csum += __LE_TO_CPU_32(*ptr++);
+
+ return (csum);
+}
+
+static __checkReturn efx_rc_t
+tlv_update_partition_len_and_cks(
+ __in tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+ nvram_partition_t partition;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ size_t new_len;
+
+ /*
+ * We just modified the partition, so the total length may not be
+ * valid. Don't use tlv_find(), which performs some sanity checks
+ * that may fail here.
+ */
+ partition.data = cursor->block;
+ memcpy(&partition.tlv_cursor, cursor, sizeof (*cursor));
+ header = (struct tlv_partition_header *)partition.data;
+ /* Sanity check. */
+ if (__LE_TO_CPU_32(header->tag) != TLV_TAG_PARTITION_HEADER) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ new_len = tlv_block_length_used(&partition.tlv_cursor);
+ if (new_len == 0) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ header->total_length = __CPU_TO_LE_32(new_len);
+ /* Ensure the modified partition always has a new generation count. */
+ header->generation = __CPU_TO_LE_32(
+ __LE_TO_CPU_32(header->generation) + 1);
+
+ trailer = (struct tlv_partition_trailer *)((uint8_t *)header +
+ new_len - sizeof (*trailer) - sizeof (uint32_t));
+ trailer->generation = header->generation;
+ trailer->checksum = __CPU_TO_LE_32(
+ __LE_TO_CPU_32(trailer->checksum) -
+ checksum_tlv_partition(&partition));
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Validate buffer contents (before writing to flash) */
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_validate(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size)
+{
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ size_t total_length;
+ uint32_t cksum;
+ int pos;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK);
+
+ if ((partn_data == NULL) || (partn_size == 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* The partition header must be the first item (at offset zero) */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)partn_data,
+ partn_size)) != 0) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ header = (struct tlv_partition_header *)tlv_item(&cursor);
+
+ /* Check TLV partition length (includes the END tag) */
+ total_length = __LE_TO_CPU_32(header->total_length);
+ if (total_length > partn_size) {
+ rc = EFBIG;
+ goto fail4;
+ }
+
+ /* Check partition ends with PARTITION_TRAILER and END tags */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail5;
+ }
+ trailer = (struct tlv_partition_trailer *)tlv_item(&cursor);
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail6;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail7;
+ }
+
+ /* Check generation counts are consistent */
+ if (trailer->generation != header->generation) {
+ rc = EINVAL;
+ goto fail8;
+ }
+
+ /* Verify partition checksum */
+ cksum = 0;
+ for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(partn_data + pos));
+ }
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail9;
+ }
+
+ return (0);
+
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_create(
+ __in efx_nic_t *enp,
+ __in uint16_t partn_type,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size)
+{
+ uint32_t *buf = (uint32_t *)partn_data;
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+ struct tlv_partition_header header;
+ struct tlv_partition_trailer trailer;
+
+ unsigned int min_buf_size = sizeof (struct tlv_partition_header) +
+ sizeof (struct tlv_partition_trailer);
+ if (partn_size < min_buf_size) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ memset(buf, 0xff, partn_size);
+
+ tlv_init_block(buf);
+ if ((rc = tlv_init_cursor(&cursor, buf,
+ (uint32_t *)((uint8_t *)buf + partn_size),
+ buf)) != 0) {
+ goto fail2;
+ }
+
+ header.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_HEADER);
+ header.length = __CPU_TO_LE_32(sizeof (header) - 8);
+ header.type_id = __CPU_TO_LE_16(partn_type);
+ header.preset = 0;
+ header.generation = __CPU_TO_LE_32(1);
+ header.total_length = 0; /* This will be fixed below. */
+ if ((rc = tlv_insert(
+ &cursor, TLV_TAG_PARTITION_HEADER,
+ (uint8_t *)&header.type_id, sizeof (header) - 8)) != 0)
+ goto fail3;
+ if ((rc = tlv_advance(&cursor)) != 0)
+ goto fail4;
+
+ trailer.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_TRAILER);
+ trailer.length = __CPU_TO_LE_32(sizeof (trailer) - 8);
+ trailer.generation = header.generation;
+ trailer.checksum = 0; /* This will be fixed below. */
+ if ((rc = tlv_insert(&cursor, TLV_TAG_PARTITION_TRAILER,
+ (uint8_t *)&trailer.generation, sizeof (trailer) - 8)) != 0)
+ goto fail5;
+
+ if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0)
+ goto fail6;
+
+ /* Check that the partition is valid. */
+ if ((rc = ef10_nvram_buffer_validate(enp, partn_type,
+ partn_data, partn_size)) != 0)
+ goto fail7;
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static uint32_t
+byte_offset(
+ __in uint32_t *position,
+ __in uint32_t *base)
+{
+ return (uint32_t)((uint8_t *)position - (uint8_t *)base);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_find_item_start(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp)
+{
+ /* Read past partition header to find start address of the first key */
+ tlv_cursor_t cursor;
+ efx_rc_t rc;
+
+ /* A PARTITION_HEADER tag must be the first item (at offset zero) */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
+ buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ *startp = byte_offset(cursor.current, cursor.block);
+
+ if ((rc = tlv_require_end(&cursor)) != 0)
+ goto fail4;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_find_end(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp)
+{
+ /* Read to end of partition */
+ tlv_cursor_t cursor;
+ efx_rc_t rc;
+ uint32_t *segment_used;
+
+ _NOTE(ARGUNUSED(offset))
+
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
+ buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ segment_used = cursor.block;
+
+ /*
+ * Go through each segment and check that it has an end tag. If there
+ * is no end tag then the previous segment was the last valid one,
+ * so return the used space including that end tag.
+ */
+ while (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) {
+ if (tlv_require_end(&cursor) != 0) {
+ if (segment_used == cursor.block) {
+ /*
+ * First segment is corrupt, so there is
+ * no valid data in partition.
+ */
+ rc = EINVAL;
+ goto fail2;
+ }
+ break;
+ }
+ segment_used = cursor.end + 1;
+
+ cursor.current = segment_used;
+ }
+ /* Return space used (including the END tag) */
+ *endp = (segment_used - cursor.block) * sizeof (uint32_t);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+ef10_nvram_buffer_find_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp)
+{
+ /* Find TLV at offset and return key start and length */
+ tlv_cursor_t cursor;
+ uint8_t *key;
+ uint32_t tag;
+
+ if (tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset) != 0) {
+ return (B_FALSE);
+ }
+
+ while ((key = tlv_item(&cursor)) != NULL) {
+ tag = tlv_tag(&cursor);
+ if (tag == TLV_TAG_PARTITION_HEADER ||
+ tag == TLV_TAG_PARTITION_TRAILER) {
+ if (tlv_advance(&cursor) != 0) {
+ break;
+ }
+ continue;
+ }
+ *startp = byte_offset(cursor.current, cursor.block);
+ *lengthp = byte_offset(tlv_next_item_ptr(&cursor),
+ cursor.current);
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_get_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(item_max_size, *lengthp)
+ caddr_t itemp,
+ __in size_t item_max_size,
+ __out uint32_t *lengthp)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+ uint32_t item_length;
+
+ if (item_max_size < length) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail2;
+ }
+
+ item_length = tlv_length(&cursor);
+ if (length < item_length) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+ memcpy(itemp, tlv_value(&cursor), item_length);
+
+ *lengthp = item_length;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_insert_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail1;
+ }
+
+ rc = tlv_insert(&cursor, TLV_TAG_LICENSE, (uint8_t *)keyp, length);
+
+ if (rc != 0) {
+ goto fail2;
+ }
+
+ *lengthp = byte_offset(tlv_next_item_ptr(&cursor),
+ cursor.current);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_delete_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+
+ _NOTE(ARGUNUSED(length, end))
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail1;
+ }
+
+ if ((rc = tlv_delete(&cursor)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_finish(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
+ buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ if ((rc = tlv_require_end(&cursor)) != 0)
+ goto fail2;
+
+ if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+
+/*
+ * Read and validate a segment from a partition. A segment is a complete
+ * tlv chain between PARTITION_HEADER and PARTITION_END tags. There may
+ * be multiple segments in a partition, so seg_offset allows segments
+ * beyond the first to be read.
+ */
+static __checkReturn efx_rc_t
+ef10_nvram_read_tlv_segment(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in size_t seg_offset,
+ __in_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size)
+{
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ size_t total_length;
+ uint32_t cksum;
+ int pos;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK);
+
+ if ((seg_data == NULL) || (max_seg_size == 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Read initial chunk of the segment, starting at offset */
+ if ((rc = ef10_nvram_partn_read_mode(enp, partn, seg_offset, seg_data,
+ EF10_NVRAM_CHUNK,
+ MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0) {
+ goto fail2;
+ }
+
+ /* A PARTITION_HEADER tag must be the first item at the given offset */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail3;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail4;
+ }
+ header = (struct tlv_partition_header *)tlv_item(&cursor);
+
+ /* Check TLV segment length (includes the END tag) */
+ total_length = __LE_TO_CPU_32(header->total_length);
+ if (total_length > max_seg_size) {
+ rc = EFBIG;
+ goto fail5;
+ }
+
+ /* Read the remaining segment content */
+ if (total_length > EF10_NVRAM_CHUNK) {
+ if ((rc = ef10_nvram_partn_read_mode(enp, partn,
+ seg_offset + EF10_NVRAM_CHUNK,
+ seg_data + EF10_NVRAM_CHUNK,
+ total_length - EF10_NVRAM_CHUNK,
+ MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0)
+ goto fail6;
+ }
+
+ /* Check segment ends with PARTITION_TRAILER and END tags */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail7;
+ }
+ trailer = (struct tlv_partition_trailer *)tlv_item(&cursor);
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail8;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail9;
+ }
+
+ /* Check data read from segment is consistent */
+ if (trailer->generation != header->generation) {
+ /*
+ * The partition data may have been modified between successive
+ * MCDI NVRAM_READ requests by the MC or another PCI function.
+ *
+ * The caller must retry to obtain consistent partition data.
+ */
+ rc = EAGAIN;
+ goto fail10;
+ }
+
+ /* Verify segment checksum */
+ cksum = 0;
+ for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(seg_data + pos));
+ }
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail11;
+ }
+
+ return (0);
+
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Read a single TLV item from a host memory
+ * buffer containing a TLV formatted segment.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_buf_read_tlv(
+ __in efx_nic_t *enp,
+ __in_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*sizep) caddr_t *datap,
+ __out size_t *sizep)
+{
+ tlv_cursor_t cursor;
+ caddr_t data;
+ size_t length;
+ caddr_t value;
+ efx_rc_t rc;
+
+ if ((seg_data == NULL) || (max_seg_size == 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Find requested TLV tag in segment data */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ if ((rc = tlv_find(&cursor, tag)) != 0) {
+ rc = ENOENT;
+ goto fail3;
+ }
+ value = (caddr_t)tlv_value(&cursor);
+ length = tlv_length(&cursor);
+
+ if (length == 0)
+ data = NULL;
+ else {
+ /* Copy out data from TLV item */
+ EFSYS_KMEM_ALLOC(enp->en_esip, length, data);
+ if (data == NULL) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+ memcpy(data, value, length);
+ }
+
+ *datap = data;
+ *sizep = length;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Read a single TLV item from the first segment in a TLV formatted partition */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_read_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*seg_sizep) caddr_t *seg_datap,
+ __out size_t *seg_sizep)
+{
+ caddr_t seg_data = NULL;
+ size_t partn_size = 0;
+ size_t length;
+ caddr_t data;
+ int retry;
+ efx_rc_t rc;
+
+ /* Allocate sufficient memory for the entire partition */
+ if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0)
+ goto fail1;
+
+ if (partn_size == 0) {
+ rc = ENOENT;
+ goto fail2;
+ }
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, seg_data);
+ if (seg_data == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ /*
+ * Read the first segment in a TLV partition. Retry until consistent
+ * segment contents are returned. Inconsistent data may be read if:
+ * a) the segment contents are invalid
+ * b) the MC has rebooted while we were reading the partition
+ * c) the partition has been modified while we were reading it
+ * Limit retry attempts to ensure forward progress.
+ */
+ retry = 10;
+ do {
+ rc = ef10_nvram_read_tlv_segment(enp, partn, 0,
+ seg_data, partn_size);
+ } while ((rc == EAGAIN) && (--retry > 0));
+
+ if (rc != 0) {
+ /* Failed to obtain consistent segment data */
+ goto fail4;
+ }
+
+ if ((rc = ef10_nvram_buf_read_tlv(enp, seg_data, partn_size,
+ tag, &data, &length)) != 0)
+ goto fail5;
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data);
+
+ *seg_datap = data;
+ *seg_sizep = length;
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Compute the size of a segment. */
+ static __checkReturn efx_rc_t
+ef10_nvram_buf_segment_size(
+ __in caddr_t seg_data,
+ __in size_t max_seg_size,
+ __out size_t *seg_sizep)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ uint32_t cksum;
+ int pos;
+ uint32_t *end_tag_position;
+ uint32_t segment_length;
+
+ /* A PARTITION_HEADER tag must be the first item at the given offset */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ header = (struct tlv_partition_header *)tlv_item(&cursor);
+
+ /* Check TLV segment length (includes the END tag) */
+ *seg_sizep = __LE_TO_CPU_32(header->total_length);
+ if (*seg_sizep > max_seg_size) {
+ rc = EFBIG;
+ goto fail3;
+ }
+
+ /* Check segment ends with PARTITION_TRAILER and END tags */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail5;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail6;
+ }
+ end_tag_position = cursor.current;
+
+ /* Verify segment checksum */
+ cksum = 0;
+ for (pos = 0; (size_t)pos < *seg_sizep; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(seg_data + pos));
+ }
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail7;
+ }
+
+ /*
+ * Calculate total length from HEADER to END tags and compare to
+ * max_seg_size and the total_length field in the HEADER tag.
+ */
+ segment_length = tlv_block_length_used(&cursor);
+
+ if (segment_length > max_seg_size) {
+ rc = EINVAL;
+ goto fail8;
+ }
+
+ if (segment_length != *seg_sizep) {
+ rc = EINVAL;
+ goto fail9;
+ }
+
+ /* Skip over the first HEADER tag. */
+ rc = tlv_rewind(&cursor);
+ rc = tlv_advance(&cursor);
+
+ while (rc == 0) {
+ if (tlv_tag(&cursor) == TLV_TAG_END) {
+ /* Check that the END tag is the one found earlier. */
+ if (cursor.current != end_tag_position)
+ goto fail10;
+ break;
+ }
+ /* Check for duplicate HEADER tags before the END tag. */
+ if (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail11;
+ }
+
+ rc = tlv_advance(&cursor);
+ }
+ if (rc != 0)
+ goto fail12;
+
+ return (0);
+
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Add or update a single TLV item in a host memory buffer containing a TLV
+ * formatted segment. Historically partitions consisted of only one segment.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_buf_write_tlv(
+ __inout_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size,
+ __in uint32_t tag,
+ __in_bcount(tag_size) caddr_t tag_data,
+ __in size_t tag_size,
+ __out size_t *total_lengthp)
+{
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ uint32_t generation;
+ uint32_t cksum;
+ int pos;
+ efx_rc_t rc;
+
+ /* A PARTITION_HEADER tag must be the first item (at offset zero) */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ header = (struct tlv_partition_header *)tlv_item(&cursor);
+
+ /* Update the TLV chain to contain the new data */
+ if ((rc = tlv_find(&cursor, tag)) == 0) {
+ /* Modify existing TLV item */
+ if ((rc = tlv_modify(&cursor, tag,
+ (uint8_t *)tag_data, tag_size)) != 0)
+ goto fail3;
+ } else {
+ /* Insert a new TLV item before the PARTITION_TRAILER */
+ rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER);
+ if (rc != 0) {
+ rc = EINVAL;
+ goto fail4;
+ }
+ if ((rc = tlv_insert(&cursor, tag,
+ (uint8_t *)tag_data, tag_size)) != 0) {
+ rc = EINVAL;
+ goto fail5;
+ }
+ }
+
+ /* Find the trailer tag */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail6;
+ }
+ trailer = (struct tlv_partition_trailer *)tlv_item(&cursor);
+
+ /* Update PARTITION_HEADER and PARTITION_TRAILER fields */
+ *total_lengthp = tlv_block_length_used(&cursor);
+ if (*total_lengthp > max_seg_size) {
+ rc = ENOSPC;
+ goto fail7;
+ }
+ generation = __LE_TO_CPU_32(header->generation) + 1;
+
+ header->total_length = __CPU_TO_LE_32(*total_lengthp);
+ header->generation = __CPU_TO_LE_32(generation);
+ trailer->generation = __CPU_TO_LE_32(generation);
+
+ /* Recompute PARTITION_TRAILER checksum */
+ trailer->checksum = 0;
+ cksum = 0;
+ for (pos = 0; (size_t)pos < *total_lengthp; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(seg_data + pos));
+ }
+ trailer->checksum = ~cksum + 1;
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Add or update a single TLV item in the first segment of a TLV formatted
+ * dynamic config partition. The first segment is the current active
+ * configuration.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_write_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ return ef10_nvram_partn_write_segment_tlv(enp, partn, tag, data,
+ size, B_FALSE);
+}
+
+/*
+ * Read a segment from nvram at the given offset into a buffer (segment_data)
+ * and optionally write a new tag to it.
+ */
+static __checkReturn efx_rc_t
+ef10_nvram_segment_write_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout caddr_t *seg_datap,
+ __inout size_t *partn_offsetp,
+ __inout size_t *src_remain_lenp,
+ __inout size_t *dest_remain_lenp,
+ __in boolean_t write)
+{
+ efx_rc_t rc;
+ efx_rc_t status;
+ size_t original_segment_size;
+ size_t modified_segment_size;
+
+ /*
+ * Read the segment from NVRAM into the segment_data buffer and validate
+ * it, returning if it does not validate. This is not a failure unless
+ * this is the first segment in a partition. In this case the caller
+ * must propagate the error.
+ */
+ status = ef10_nvram_read_tlv_segment(enp, partn, *partn_offsetp,
+ *seg_datap, *src_remain_lenp);
+ if (status != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ status = ef10_nvram_buf_segment_size(*seg_datap,
+ *src_remain_lenp, &original_segment_size);
+ if (status != 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if (write) {
+ /* Update the contents of the segment in the buffer */
+ if ((rc = ef10_nvram_buf_write_tlv(*seg_datap,
+ *dest_remain_lenp, tag, data, size,
+ &modified_segment_size)) != 0) {
+ goto fail3;
+ }
+ *dest_remain_lenp -= modified_segment_size;
+ *seg_datap += modified_segment_size;
+ } else {
+ /*
+ * We won't modify this segment, but still need to update the
+ * remaining lengths and pointers.
+ */
+ *dest_remain_lenp -= original_segment_size;
+ *seg_datap += original_segment_size;
+ }
+
+ *partn_offsetp += original_segment_size;
+ *src_remain_lenp -= original_segment_size;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Add or update a single TLV item in either the first segment or in all
+ * segments in a TLV formatted dynamic config partition. Dynamic config
+ * partitions on boards that support RFID are divided into a number of segments,
+ * each formatted like a partition, with header, trailer and end tags. The first
+ * segment is the current active configuration.
+ *
+ * The segments are initialised by manftest and each contain a different
+ * configuration e.g. firmware variant. The firmware can be instructed
+ * via RFID to copy a segment to replace the first segment, hence changing the
+ * active configuration. This allows ops to change the configuration of a board
+ * prior to shipment using RFID.
+ *
+ * Changes to the dynamic config may need to be written to all segments (e.g.
+ * firmware versions) or just the first segment (changes to the active
+ * configuration). See SF-111324-SW "The use of RFID in Solarflare Products".
+ * If only the first segment is written the code still needs to be aware of the
+ * possible presence of subsequent segments as writing to a segment may cause
+ * its size to increase, which would overwrite the subsequent segments and
+ * invalidate them.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_write_segment_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t all_segments)
+{
+ size_t partn_size = 0;
+ caddr_t partn_data;
+ size_t total_length = 0;
+ efx_rc_t rc;
+ size_t current_offset = 0;
+ size_t remaining_original_length;
+ size_t remaining_modified_length;
+ caddr_t segment_data;
+
+ EFSYS_ASSERT3U(partn, ==, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG);
+
+ /* Allocate sufficient memory for the entire partition */
+ if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0)
+ goto fail1;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, partn_data);
+ if (partn_data == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ remaining_original_length = partn_size;
+ remaining_modified_length = partn_size;
+ segment_data = partn_data;
+
+ /* Lock the partition */
+ if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0)
+ goto fail3;
+
+ /* Iterate over each (potential) segment to update it. */
+ do {
+ boolean_t write = all_segments || current_offset == 0;
+
+ rc = ef10_nvram_segment_write_tlv(enp, partn, tag, data, size,
+ &segment_data, &current_offset, &remaining_original_length,
+ &remaining_modified_length, write);
+ if (rc != 0) {
+ if (current_offset == 0) {
+ /*
+ * If no data has been read then the first
+ * segment is invalid, which is an error.
+ */
+ goto fail4;
+ }
+ break;
+ }
+ } while (current_offset < partn_size);
+
+ total_length = segment_data - partn_data;
+
+ /*
+ * We've run out of space. This should actually be dealt with by
+ * ef10_nvram_buf_write_tlv returning ENOSPC.
+ */
+ if (total_length > partn_size) {
+ rc = ENOSPC;
+ goto fail5;
+ }
+
+ /* Erase the whole partition in NVRAM */
+ if ((rc = ef10_nvram_partn_erase(enp, partn, 0, partn_size)) != 0)
+ goto fail6;
+
+ /* Write new partition contents from the buffer to NVRAM */
+ if ((rc = ef10_nvram_partn_write(enp, partn, 0, partn_data,
+ total_length)) != 0)
+ goto fail7;
+
+ /* Unlock the partition */
+ ef10_nvram_partn_unlock(enp, partn, NULL);
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data);
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ ef10_nvram_partn_unlock(enp, partn, NULL);
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Get the size of a NVRAM partition. This is the total size allocated in nvram,
+ * not the data used by the segments in the partition.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, sizep,
+ NULL, NULL, NULL)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_update_start(enp, partn)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_read_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode)
+{
+ size_t chunk;
+ efx_rc_t rc;
+
+ while (size > 0) {
+ chunk = MIN(size, EF10_NVRAM_CHUNK);
+
+ if ((rc = efx_mcdi_nvram_read(enp, partn, offset,
+ data, chunk, mode)) != 0) {
+ goto fail1;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ /*
+ * Read requests which come in through the EFX API expect to
+ * read the current, active partition.
+ */
+ return ef10_nvram_partn_read_mode(enp, partn, offset, data, size,
+ MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size)
+{
+ efx_rc_t rc;
+ uint32_t erase_size;
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL,
+ &erase_size, NULL)) != 0)
+ goto fail1;
+
+ if (erase_size == 0) {
+ if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, size)) != 0)
+ goto fail2;
+ } else {
+ if (size % erase_size != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ while (size > 0) {
+ if ((rc = efx_mcdi_nvram_erase(enp, partn, offset,
+ erase_size)) != 0)
+ goto fail4;
+ offset += erase_size;
+ size -= erase_size;
+ }
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t chunk;
+ uint32_t write_size;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL,
+ NULL, &write_size)) != 0)
+ goto fail1;
+
+ if (write_size != 0) {
+ /*
+ * Check that the size is a multiple of the write chunk size if
+ * the write chunk size is available.
+ */
+ if (size % write_size != 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ } else {
+ write_size = EF10_NVRAM_CHUNK;
+ }
+
+ while (size > 0) {
+ chunk = MIN(size, write_size);
+
+ if ((rc = efx_mcdi_nvram_write(enp, partn, offset,
+ data, chunk)) != 0) {
+ goto fail3;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt uint32_t *resultp)
+{
+ boolean_t reboot = B_FALSE;
+ efx_rc_t rc;
+
+ if (resultp != NULL)
+ *resultp = MC_CMD_NVRAM_VERIFY_RC_UNKNOWN;
+
+ rc = efx_mcdi_nvram_update_finish(enp, partn, reboot, resultp);
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4])
+{
+ struct tlv_partition_version partn_version;
+ size_t size;
+ efx_rc_t rc;
+
+ /* Add or modify partition version TLV item */
+ partn_version.version_w = __CPU_TO_LE_16(version[0]);
+ partn_version.version_x = __CPU_TO_LE_16(version[1]);
+ partn_version.version_y = __CPU_TO_LE_16(version[2]);
+ partn_version.version_z = __CPU_TO_LE_16(version[3]);
+
+ size = sizeof (partn_version) - (2 * sizeof (uint32_t));
+
+ /* Write the version number to all segments in the partition */
+ if ((rc = ef10_nvram_partn_write_segment_tlv(enp,
+ NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ TLV_TAG_PARTITION_VERSION(partn),
+ (caddr_t)&partn_version.version_w, size, B_TRUE)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+typedef struct ef10_parttbl_entry_s {
+ unsigned int partn;
+ unsigned int port;
+ efx_nvram_type_t nvtype;
+} ef10_parttbl_entry_t;
+
+/* Translate EFX NVRAM types to firmware partition types */
+static ef10_parttbl_entry_t hunt_parttbl[] = {
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 1, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 2, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 3, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 4, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 1, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 2, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 3, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 4, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 1, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 2, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 3, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 4, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 2, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 3, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 4, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 1, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 2, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 3, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 4, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_FPGA, 1, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 2, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 3, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 4, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 3, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 4, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 3, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 4, EFX_NVRAM_LICENSE}
+};
+
+static ef10_parttbl_entry_t medford_parttbl[] = {
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 1, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 2, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 3, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 4, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 1, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 2, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 3, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 4, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 1, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 2, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 3, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 4, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 2, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 3, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 4, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 1, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 2, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 3, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 4, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_FPGA, 1, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 2, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 3, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 4, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 3, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 4, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 3, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 4, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 1, EFX_NVRAM_UEFIROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 2, EFX_NVRAM_UEFIROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 3, EFX_NVRAM_UEFIROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 4, EFX_NVRAM_UEFIROM}
+};
+
+static __checkReturn efx_rc_t
+ef10_parttbl_get(
+ __in efx_nic_t *enp,
+ __out ef10_parttbl_entry_t **parttblp,
+ __out size_t *parttbl_rowsp)
+{
+ switch (enp->en_family) {
+ case EFX_FAMILY_HUNTINGTON:
+ *parttblp = hunt_parttbl;
+ *parttbl_rowsp = EFX_ARRAY_SIZE(hunt_parttbl);
+ break;
+
+ case EFX_FAMILY_MEDFORD:
+ *parttblp = medford_parttbl;
+ *parttbl_rowsp = EFX_ARRAY_SIZE(medford_parttbl);
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ return (EINVAL);
+ }
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ ef10_parttbl_entry_t *parttbl = NULL;
+ size_t parttbl_rows = 0;
+ unsigned int i;
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT(partnp != NULL);
+
+ if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) {
+ for (i = 0; i < parttbl_rows; i++) {
+ ef10_parttbl_entry_t *entry = &parttbl[i];
+
+ if (entry->nvtype == type &&
+ entry->port == emip->emi_port) {
+ *partnp = entry->partn;
+ return (0);
+ }
+ }
+ }
+
+ return (ENOTSUP);
+}
+
+#if EFSYS_OPT_DIAG
+
+static __checkReturn efx_rc_t
+ef10_nvram_partn_to_type(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out efx_nvram_type_t *typep)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ ef10_parttbl_entry_t *parttbl = NULL;
+ size_t parttbl_rows = 0;
+ unsigned int i;
+
+ EFSYS_ASSERT(typep != NULL);
+
+ if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) {
+ for (i = 0; i < parttbl_rows; i++) {
+ ef10_parttbl_entry_t *entry = &parttbl[i];
+
+ if (entry->partn == partn &&
+ entry->port == emip->emi_port) {
+ *typep = entry->nvtype;
+ return (0);
+ }
+ }
+ }
+
+ return (ENOTSUP);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_test(
+ __in efx_nic_t *enp)
+{
+ efx_nvram_type_t type;
+ unsigned int npartns = 0;
+ uint32_t *partns = NULL;
+ size_t size;
+ unsigned int i;
+ efx_rc_t rc;
+
+ /* Read available partitions from NVRAM partition map */
+ size = MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM * sizeof (uint32_t);
+ EFSYS_KMEM_ALLOC(enp->en_esip, size, partns);
+ if (partns == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_nvram_partitions(enp, (caddr_t)partns, size,
+ &npartns)) != 0) {
+ goto fail2;
+ }
+
+ for (i = 0; i < npartns; i++) {
+ /* Check if the partition is supported for this port */
+ if ((rc = ef10_nvram_partn_to_type(enp, partns[i], &type)) != 0)
+ continue;
+
+ if ((rc = efx_mcdi_nvram_test(enp, partns[i])) != 0)
+ goto fail3;
+ }
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, partns);
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+ EFSYS_KMEM_FREE(enp->en_esip, size, partns);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4])
+{
+ efx_rc_t rc;
+
+ /* FIXME: get highest partn version from all ports */
+ /* FIXME: return partn description if available */
+
+ if ((rc = efx_mcdi_nvram_metadata(enp, partn, subtypep,
+ version, NULL, 0)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep)
+{
+ efx_rc_t rc;
+
+ if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0)
+ goto fail1;
+
+ if (chunk_sizep != NULL)
+ *chunk_sizep = EF10_NVRAM_CHUNK;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_rc_t rc;
+
+ if ((rc = ef10_nvram_partn_unlock(enp, partn, NULL)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/ef10_phy.c b/src/seastar/dpdk/drivers/net/sfc/base/ef10_phy.c
new file mode 100644
index 00000000..81309f29
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/ef10_phy.c
@@ -0,0 +1,631 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static void
+mcdi_phy_decode_cap(
+ __in uint32_t mcdi_cap,
+ __out uint32_t *maskp)
+{
+ uint32_t mask;
+
+ mask = 0;
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_40000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ mask |= (1 << EFX_PHY_CAP_PAUSE);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ mask |= (1 << EFX_PHY_CAP_ASYM);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ mask |= (1 << EFX_PHY_CAP_AN);
+
+ *maskp = mask;
+}
+
+static void
+mcdi_phy_decode_link_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t link_flags,
+ __in unsigned int speed,
+ __in unsigned int fcntl,
+ __out efx_link_mode_t *link_modep,
+ __out unsigned int *fcntlp)
+{
+ boolean_t fd = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ boolean_t up = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (!up)
+ *link_modep = EFX_LINK_DOWN;
+ else if (speed == 40000 && fd)
+ *link_modep = EFX_LINK_40000FDX;
+ else if (speed == 10000 && fd)
+ *link_modep = EFX_LINK_10000FDX;
+ else if (speed == 1000)
+ *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX;
+ else if (speed == 100)
+ *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX;
+ else if (speed == 10)
+ *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX;
+ else
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ if (fcntl == MC_CMD_FCNTL_OFF)
+ *fcntlp = 0;
+ else if (fcntl == MC_CMD_FCNTL_RESPOND)
+ *fcntlp = EFX_FCNTL_RESPOND;
+ else if (fcntl == MC_CMD_FCNTL_GENERATE)
+ *fcntlp = EFX_FCNTL_GENERATE;
+ else if (fcntl == MC_CMD_FCNTL_BIDIR)
+ *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ else {
+ EFSYS_PROBE1(mc_pcol_error, int, fcntl);
+ *fcntlp = 0;
+ }
+}
+
+
+ void
+ef10_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ unsigned int link_flags;
+ unsigned int speed;
+ unsigned int fcntl;
+ efx_link_mode_t link_mode;
+ uint32_t lp_cap_mask;
+
+ /*
+ * Convert the LINKCHANGE speed enumeration into mbit/s, in the
+ * same way as GET_LINK encodes the speed
+ */
+ switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) {
+ case MCDI_EVENT_LINKCHANGE_SPEED_100M:
+ speed = 100;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_1G:
+ speed = 1000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_10G:
+ speed = 10000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_40G:
+ speed = 40000;
+ break;
+ default:
+ speed = 0;
+ break;
+ }
+
+ link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS);
+ mcdi_phy_decode_link_mode(enp, link_flags, speed,
+ MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL),
+ &link_mode, &fcntl);
+ mcdi_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP),
+ &lp_cap_mask);
+
+ /*
+ * It's safe to update ep_lp_cap_mask without the driver's port lock
+ * because presumably any concurrently running efx_port_poll() is
+ * only going to arrive at the same value.
+ *
+ * ep_fcntl has two meanings. It's either the link common fcntl
+ * (if the PHY supports AN), or it's the forced link state. If
+ * the former, it's safe to update the value for the same reason as
+ * for ep_lp_cap_mask. If the latter, then just ignore the value,
+ * because we can race with efx_mac_fcntl_set().
+ */
+ epp->ep_lp_cap_mask = lp_cap_mask;
+ epp->ep_fcntl = fcntl;
+
+ *link_modep = link_mode;
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t power)
+{
+ efx_rc_t rc;
+
+ if (!power)
+ return (0);
+
+ /* Check if the PHY is a zombie */
+ if ((rc = ef10_phy_verify(enp)) != 0)
+ goto fail1;
+
+ enp->en_reset_flags |= EFX_RESET_PHY;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_get_link(
+ __in efx_nic_t *enp,
+ __out ef10_link_state_t *elsp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
+ MC_CMD_GET_LINK_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
+ &elsp->els_adv_cap_mask);
+ mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
+ &elsp->els_lp_cap_mask);
+
+ mcdi_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
+ &elsp->els_link_mode, &elsp->els_fcntl);
+
+#if EFSYS_OPT_LOOPBACK
+ /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
+
+ elsp->els_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ elsp->els_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_LINK_IN_LEN,
+ MC_CMD_SET_LINK_OUT_LEN)];
+ uint32_t cap_mask;
+ unsigned int led_mode;
+ unsigned int speed;
+ boolean_t supported;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_link_control_supported(enp, &supported)) != 0)
+ goto fail1;
+ if (supported == B_FALSE)
+ goto out;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN;
+
+ cap_mask = epp->ep_adv_cap_mask;
+ MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP,
+ PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1,
+ PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1,
+ PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1,
+ PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1,
+ PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1,
+ PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1,
+ PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1,
+ PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1,
+ PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1,
+ PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);
+ /* Too many fields for for POPULATE macros, so insert this afterwards */
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_40000FDX, (cap_mask >> EFX_PHY_CAP_40000FDX) & 0x1);
+
+#if EFSYS_OPT_LOOPBACK
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE,
+ epp->ep_loopback_type);
+ switch (epp->ep_loopback_link_mode) {
+ case EFX_LINK_100FDX:
+ speed = 100;
+ break;
+ case EFX_LINK_1000FDX:
+ speed = 1000;
+ break;
+ case EFX_LINK_10000FDX:
+ speed = 10000;
+ break;
+ case EFX_LINK_40000FDX:
+ speed = 40000;
+ break;
+ default:
+ speed = 0;
+ }
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE);
+ speed = 0;
+#endif /* EFSYS_OPT_LOOPBACK */
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed);
+
+#if EFSYS_OPT_PHY_FLAGS
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags);
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0);
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ /* And set the blink mode */
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_ID_LED;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN;
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+ switch (epp->ep_phy_led_mode) {
+ case EFX_PHY_LED_DEFAULT:
+ led_mode = MC_CMD_LED_DEFAULT;
+ break;
+ case EFX_PHY_LED_OFF:
+ led_mode = MC_CMD_LED_OFF;
+ break;
+ case EFX_PHY_LED_ON:
+ led_mode = MC_CMD_LED_ON;
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ led_mode = MC_CMD_LED_DEFAULT;
+ }
+
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode);
+#else
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT);
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+out:
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_verify(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,
+ MC_CMD_GET_PHY_STATE_OUT_LEN)];
+ uint32_t state;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE);
+ if (state != MC_CMD_PHY_STATE_OK) {
+ if (state != MC_CMD_PHY_STATE_ZOMBIE)
+ EFSYS_PROBE1(mc_pcol_error, int, state);
+ rc = ENOTACTIVE;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip)
+{
+ _NOTE(ARGUNUSED(enp, ouip))
+
+ return (ENOTSUP);
+}
+
+#if EFSYS_OPT_PHY_STATS
+
+ __checkReturn efx_rc_t
+ef10_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ /* TBD: no stats support in firmware yet */
+ _NOTE(ARGUNUSED(enp, esmp))
+ memset(stat, 0, EFX_PHY_NSTATS * sizeof (*stat));
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+
+ __checkReturn efx_rc_t
+ef10_bist_enable_offline(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_bist_enable_offline(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_bist_start(enp, type)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_POLL_BIST_IN_LEN,
+ MCDI_CTL_SDU_LEN_MAX)];
+ uint32_t value_mask = 0;
+ uint32_t result;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(type))
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_POLL_BIST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MCDI_CTL_SDU_LEN_MAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_POLL_BIST_OUT_RESULT_OFST + 4) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (count > 0)
+ (void) memset(valuesp, '\0', count * sizeof (unsigned long));
+
+ result = MCDI_OUT_DWORD(req, POLL_BIST_OUT_RESULT);
+
+ if (result == MC_CMD_POLL_BIST_FAILED &&
+ req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MEM_LEN &&
+ count > EFX_BIST_MEM_ECC_FATAL) {
+ if (valuesp != NULL) {
+ valuesp[EFX_BIST_MEM_TEST] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_TEST);
+ valuesp[EFX_BIST_MEM_ADDR] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ADDR);
+ valuesp[EFX_BIST_MEM_BUS] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_BUS);
+ valuesp[EFX_BIST_MEM_EXPECT] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_EXPECT);
+ valuesp[EFX_BIST_MEM_ACTUAL] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ACTUAL);
+ valuesp[EFX_BIST_MEM_ECC] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC);
+ valuesp[EFX_BIST_MEM_ECC_PARITY] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC_PARITY);
+ valuesp[EFX_BIST_MEM_ECC_FATAL] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC_FATAL);
+ }
+ value_mask |= (1 << EFX_BIST_MEM_TEST) |
+ (1 << EFX_BIST_MEM_ADDR) |
+ (1 << EFX_BIST_MEM_BUS) |
+ (1 << EFX_BIST_MEM_EXPECT) |
+ (1 << EFX_BIST_MEM_ACTUAL) |
+ (1 << EFX_BIST_MEM_ECC) |
+ (1 << EFX_BIST_MEM_ECC_PARITY) |
+ (1 << EFX_BIST_MEM_ECC_FATAL);
+ } else if (result == MC_CMD_POLL_BIST_FAILED &&
+ encp->enc_phy_type == EFX_PHY_XFI_FARMI &&
+ req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MRSFP_LEN &&
+ count > EFX_BIST_FAULT_CODE) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_FAULT_CODE] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MRSFP_TEST);
+ value_mask |= 1 << EFX_BIST_FAULT_CODE;
+ }
+
+ if (value_maskp != NULL)
+ *value_maskp = value_mask;
+
+ EFSYS_ASSERT(resultp != NULL);
+ if (result == MC_CMD_POLL_BIST_RUNNING)
+ *resultp = EFX_BIST_RESULT_RUNNING;
+ else if (result == MC_CMD_POLL_BIST_PASSED)
+ *resultp = EFX_BIST_RESULT_PASSED;
+ else
+ *resultp = EFX_BIST_RESULT_FAILED;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ /* There is no way to stop BIST on EF10. */
+ _NOTE(ARGUNUSED(enp, type))
+}
+
+#endif /* EFSYS_OPT_BIST */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/ef10_rx.c b/src/seastar/dpdk/drivers/net/sfc/base/ef10_rx.c
new file mode 100644
index 00000000..b65faedd
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/ef10_rx.c
@@ -0,0 +1,965 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_init_rxq(
+ __in efx_nic_t *enp,
+ __in uint32_t size,
+ __in uint32_t target_evq,
+ __in uint32_t label,
+ __in uint32_t instance,
+ __in efsys_mem_t *esmp,
+ __in boolean_t disable_scatter,
+ __in uint32_t ps_bufsize)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_INIT_RXQ_EXT_IN_LEN,
+ MC_CMD_INIT_RXQ_EXT_OUT_LEN)];
+ int npages = EFX_RXQ_NBUFS(size);
+ int i;
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ efx_rc_t rc;
+ uint32_t dma_mode;
+
+ /* If this changes, then the payload size might need to change. */
+ EFSYS_ASSERT3U(MC_CMD_INIT_RXQ_OUT_LEN, ==, 0);
+ EFSYS_ASSERT3U(size, <=, EFX_RXQ_MAXNDESCS);
+
+ if (ps_bufsize > 0)
+ dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM;
+ else
+ dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_INIT_RXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_RXQ_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_RXQ_EXT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, size);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, target_evq);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance);
+ MCDI_IN_POPULATE_DWORD_8(req, INIT_RXQ_EXT_IN_FLAGS,
+ INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0,
+ INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0,
+ INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0,
+ INIT_RXQ_EXT_IN_CRC_MODE, 0,
+ INIT_RXQ_EXT_IN_FLAG_PREFIX, 1,
+ INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, disable_scatter,
+ INIT_RXQ_EXT_IN_DMA_MODE,
+ dma_mode,
+ INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, ps_bufsize);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
+
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fini_rxq(
+ __in efx_nic_t *enp,
+ __in uint32_t instance)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FINI_RXQ_IN_LEN,
+ MC_CMD_FINI_RXQ_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FINI_RXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+efx_mcdi_rss_context_alloc(
+ __in efx_nic_t *enp,
+ __in efx_rx_scale_support_t scale_support,
+ __in uint32_t num_queues,
+ __out uint32_t *rss_contextp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN,
+ MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)];
+ uint32_t rss_context;
+ uint32_t context_type;
+ efx_rc_t rc;
+
+ if (num_queues > EFX_MAXRSS) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ switch (scale_support) {
+ case EFX_RX_SCALE_EXCLUSIVE:
+ context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE;
+ break;
+ case EFX_RX_SCALE_SHARED:
+ context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_ALLOC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_TYPE, context_type);
+ /* NUM_QUEUES is only used to validate indirection table offsets */
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, num_queues);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail4;
+ }
+
+ rss_context = MCDI_OUT_DWORD(req, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = ENOENT;
+ goto fail5;
+ }
+
+ *rss_contextp = rss_context;
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_free(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_FREE_IN_LEN,
+ MC_CMD_RSS_CONTEXT_FREE_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_FREE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_FREE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_FREE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, rss_context);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_set_flags(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in efx_rx_hash_type_t type)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_FLAGS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
+ rss_context);
+
+ MCDI_IN_POPULATE_DWORD_4(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN,
+ (type & EFX_RX_HASH_IPV4) ? 1 : 0,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN,
+ (type & EFX_RX_HASH_TCPIPV4) ? 1 : 0,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN,
+ (type & EFX_RX_HASH_IPV6) ? 1 : 0,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN,
+ (type & EFX_RX_HASH_TCPIPV6) ? 1 : 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_set_key(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_KEY;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
+ rss_context);
+
+ EFSYS_ASSERT3U(n, ==, MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+ if (n != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ memcpy(MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY),
+ key, n);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_set_table(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN)];
+ uint8_t *req_table;
+ int i, rc;
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_TABLE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
+ rss_context);
+
+ req_table =
+ MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE);
+
+ for (i = 0;
+ i < MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN;
+ i++) {
+ req_table[i] = (n > 0) ? (uint8_t)table[i % n] : 0;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+
+ __checkReturn efx_rc_t
+ef10_rx_init(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_RX_SCALE
+
+ if (efx_mcdi_rss_context_alloc(enp, EFX_RX_SCALE_EXCLUSIVE, EFX_MAXRSS,
+ &enp->en_rss_context) == 0) {
+ /*
+ * Allocated an exclusive RSS context, which allows both the
+ * indirection table and key to be modified.
+ */
+ enp->en_rss_support = EFX_RX_SCALE_EXCLUSIVE;
+ enp->en_hash_support = EFX_RX_HASH_AVAILABLE;
+ } else {
+ /*
+ * Failed to allocate an exclusive RSS context. Continue
+ * operation without support for RSS. The pseudo-header in
+ * received packets will not contain a Toeplitz hash value.
+ */
+ enp->en_rss_support = EFX_RX_SCALE_UNAVAILABLE;
+ enp->en_hash_support = EFX_RX_HASH_UNAVAILABLE;
+ }
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ return (0);
+}
+
+#if EFSYS_OPT_RX_SCATTER
+ __checkReturn efx_rc_t
+ef10_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size)
+{
+ _NOTE(ARGUNUSED(enp, buf_size))
+ return (0);
+}
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+ef10_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(alg, ==, EFX_RX_HASHALG_TOEPLITZ);
+ EFSYS_ASSERT3U(insert, ==, B_TRUE);
+
+ if ((alg != EFX_RX_HASHALG_TOEPLITZ) || (insert == B_FALSE)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ if ((rc = efx_mcdi_rss_context_set_flags(enp,
+ enp->en_rss_context, type)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+ef10_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ efx_rc_t rc;
+
+ if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_rss_context_set_key(enp,
+ enp->en_rss_context, key, n)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+ef10_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ efx_rc_t rc;
+
+ if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_rss_context_set_table(enp,
+ enp->en_rss_context, table, n)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+
+/*
+ * EF10 RX pseudo-header
+ * ---------------------
+ *
+ * Receive packets are prefixed by an (optional) 14 byte pseudo-header:
+ *
+ * +00: Toeplitz hash value.
+ * (32bit little-endian)
+ * +04: Outer VLAN tag. Zero if the packet did not have an outer VLAN tag.
+ * (16bit big-endian)
+ * +06: Inner VLAN tag. Zero if the packet did not have an inner VLAN tag.
+ * (16bit big-endian)
+ * +08: Packet Length. Zero if the RX datapath was in cut-through mode.
+ * (16bit little-endian)
+ * +10: MAC timestamp. Zero if timestamping is not enabled.
+ * (32bit little-endian)
+ *
+ * See "The RX Pseudo-header" in SF-109306-TC.
+ */
+
+ __checkReturn efx_rc_t
+ef10_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ /*
+ * The RX pseudo-header contains the packet length, excluding the
+ * pseudo-header. If the hardware receive datapath was operating in
+ * cut-through mode then the length in the RX pseudo-header will be
+ * zero, and the packet length must be obtained from the DMA length
+ * reported in the RX event.
+ */
+ *lengthp = buffer[8] | (buffer[9] << 8);
+ return (0);
+}
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn uint32_t
+ef10_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ switch (func) {
+ case EFX_RX_HASHALG_TOEPLITZ:
+ return (buffer[0] |
+ (buffer[1] << 8) |
+ (buffer[2] << 16) |
+ (buffer[3] << 24));
+
+ default:
+ EFSYS_ASSERT(0);
+ return (0);
+ }
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ void
+ef10_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added)
+{
+ efx_qword_t qword;
+ unsigned int i;
+ unsigned int offset;
+ unsigned int id;
+
+ /* The client driver must not overfill the queue */
+ EFSYS_ASSERT3U(added - completed + n, <=,
+ EFX_RXQ_LIMIT(erp->er_mask + 1));
+
+ id = added & (erp->er_mask);
+ for (i = 0; i < n; i++) {
+ EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
+ unsigned int, id, efsys_dma_addr_t, addrp[i],
+ size_t, size);
+
+ EFX_POPULATE_QWORD_3(qword,
+ ESF_DZ_RX_KER_BYTE_CNT, (uint32_t)(size),
+ ESF_DZ_RX_KER_BUF_ADDR_DW0,
+ (uint32_t)(addrp[i] & 0xffffffff),
+ ESF_DZ_RX_KER_BUF_ADDR_DW1,
+ (uint32_t)(addrp[i] >> 32));
+
+ offset = id * sizeof (efx_qword_t);
+ EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);
+
+ id = (id + 1) & (erp->er_mask);
+ }
+}
+
+ void
+ef10_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ unsigned int pushed = *pushedp;
+ uint32_t wptr;
+ efx_dword_t dword;
+
+ /* Hardware has alignment restriction for WPTR */
+ wptr = P2ALIGN(added, EF10_RX_WPTR_ALIGN);
+ if (pushed == wptr)
+ return;
+
+ *pushedp = wptr;
+
+ /* Push the populated descriptors out */
+ wptr &= erp->er_mask;
+
+ EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, wptr);
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,
+ wptr, pushed & erp->er_mask);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
+ erp->er_index, &dword, B_FALSE);
+}
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+ void
+ef10_rx_qps_update_credits(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_dword_t dword;
+ efx_evq_rxq_state_t *rxq_state =
+ &erp->er_eep->ee_rxq_state[erp->er_label];
+
+ EFSYS_ASSERT(rxq_state->eers_rx_packed_stream);
+
+ if (rxq_state->eers_rx_packed_stream_credits == 0)
+ return;
+
+ EFX_POPULATE_DWORD_3(dword,
+ ERF_DZ_RX_DESC_MAGIC_DOORBELL, 1,
+ ERF_DZ_RX_DESC_MAGIC_CMD,
+ ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS,
+ ERF_DZ_RX_DESC_MAGIC_DATA,
+ rxq_state->eers_rx_packed_stream_credits);
+ EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
+ erp->er_index, &dword, B_FALSE);
+
+ rxq_state->eers_rx_packed_stream_credits = 0;
+}
+
+ __checkReturn uint8_t *
+ef10_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp)
+{
+ uint16_t buf_len;
+ uint8_t *pkt_start;
+ efx_qword_t *qwordp;
+ efx_evq_rxq_state_t *rxq_state =
+ &erp->er_eep->ee_rxq_state[erp->er_label];
+
+ EFSYS_ASSERT(rxq_state->eers_rx_packed_stream);
+
+ buffer += current_offset;
+ pkt_start = buffer + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE;
+
+ qwordp = (efx_qword_t *)buffer;
+ *timestamp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_TSTAMP);
+ *lengthp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_ORIG_LEN);
+ buf_len = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_CAP_LEN);
+
+ buf_len = P2ROUNDUP(buf_len + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE,
+ EFX_RX_PACKED_STREAM_ALIGNMENT);
+ *next_offsetp =
+ current_offset + buf_len + EFX_RX_PACKED_STREAM_ALIGNMENT;
+
+ EFSYS_ASSERT3U(*next_offsetp, <=, buffer_length);
+ EFSYS_ASSERT3U(current_offset + *lengthp, <, *next_offsetp);
+
+ if ((*next_offsetp ^ current_offset) &
+ EFX_RX_PACKED_STREAM_MEM_PER_CREDIT) {
+ if (rxq_state->eers_rx_packed_stream_credits <
+ EFX_RX_PACKED_STREAM_MAX_CREDITS)
+ rxq_state->eers_rx_packed_stream_credits++;
+ }
+
+ return (pkt_start);
+}
+
+
+#endif
+
+ __checkReturn efx_rc_t
+ef10_rx_qflush(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_fini_rxq(enp, erp->er_index)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_rx_qenable(
+ __in efx_rxq_t *erp)
+{
+ /* FIXME */
+ _NOTE(ARGUNUSED(erp))
+ /* FIXME */
+}
+
+ __checkReturn efx_rc_t
+ef10_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+ boolean_t disable_scatter;
+ unsigned int ps_buf_size;
+
+ _NOTE(ARGUNUSED(id, erp))
+
+ EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << ESF_DZ_RX_QLABEL_WIDTH));
+ EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
+ EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);
+
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS));
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS));
+
+ if (!ISP2(n) || (n < EFX_RXQ_MINNDESCS) || (n > EFX_RXQ_MAXNDESCS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_rxq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ switch (type) {
+ case EFX_RXQ_TYPE_DEFAULT:
+ case EFX_RXQ_TYPE_SCATTER:
+ ps_buf_size = 0;
+ break;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ case EFX_RXQ_TYPE_PACKED_STREAM_1M:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M;
+ break;
+ case EFX_RXQ_TYPE_PACKED_STREAM_512K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K;
+ break;
+ case EFX_RXQ_TYPE_PACKED_STREAM_256K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K;
+ break;
+ case EFX_RXQ_TYPE_PACKED_STREAM_128K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K;
+ break;
+ case EFX_RXQ_TYPE_PACKED_STREAM_64K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K;
+ break;
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+ default:
+ rc = ENOTSUP;
+ goto fail3;
+ }
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+ if (ps_buf_size != 0) {
+ /* Check if datapath firmware supports packed stream mode */
+ if (encp->enc_rx_packed_stream_supported == B_FALSE) {
+ rc = ENOTSUP;
+ goto fail4;
+ }
+ /* Check if packed stream allows configurable buffer sizes */
+ if ((type != EFX_RXQ_TYPE_PACKED_STREAM_1M) &&
+ (encp->enc_rx_var_packed_stream_supported == B_FALSE)) {
+ rc = ENOTSUP;
+ goto fail5;
+ }
+ }
+#else /* EFSYS_OPT_RX_PACKED_STREAM */
+ EFSYS_ASSERT(ps_buf_size == 0);
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+
+ /* Scatter can only be disabled if the firmware supports doing so */
+ if (type == EFX_RXQ_TYPE_SCATTER)
+ disable_scatter = B_FALSE;
+ else
+ disable_scatter = encp->enc_rx_disable_scatter_supported;
+
+ if ((rc = efx_mcdi_init_rxq(enp, n, eep->ee_index, label, index,
+ esmp, disable_scatter, ps_buf_size)) != 0)
+ goto fail6;
+
+ erp->er_eep = eep;
+ erp->er_label = label;
+
+ ef10_ev_rxlabel_init(eep, erp, label, ps_buf_size != 0);
+
+ return (0);
+
+fail6:
+ EFSYS_PROBE(fail6);
+#if EFSYS_OPT_RX_PACKED_STREAM
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_rx_qdestroy(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_evq_t *eep = erp->er_eep;
+ unsigned int label = erp->er_label;
+
+ ef10_ev_rxlabel_fini(eep, label);
+
+ EFSYS_ASSERT(enp->en_rx_qcount != 0);
+ --enp->en_rx_qcount;
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
+}
+
+ void
+ef10_rx_fini(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_RX_SCALE
+ if (enp->en_rss_support != EFX_RX_SCALE_UNAVAILABLE) {
+ (void) efx_mcdi_rss_context_free(enp, enp->en_rss_context);
+ }
+ enp->en_rss_context = 0;
+ enp->en_rss_support = EFX_RX_SCALE_UNAVAILABLE;
+#else
+ _NOTE(ARGUNUSED(enp))
+#endif /* EFSYS_OPT_RX_SCALE */
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/ef10_tlv_layout.h b/src/seastar/dpdk/drivers/net/sfc/base/ef10_tlv_layout.h
new file mode 100644
index 00000000..7d099b81
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/ef10_tlv_layout.h
@@ -0,0 +1,941 @@
+/**************************************************************************\
+*//*! \file
+** <L5_PRIVATE L5_SOURCE>
+** \author mjs
+** \brief TLV item layouts for EF10 static and dynamic config in NVRAM
+** \date 2012/11/20
+** \cop (c) Solarflare Communications Inc.
+** </L5_PRIVATE>
+*//*
+\**************************************************************************/
+
+/* These structures define the layouts for the TLV items stored in static and
+ * dynamic configuration partitions in NVRAM for EF10 (Huntington etc.).
+ *
+ * They contain the same sort of information that was kept in the
+ * siena_mc_static_config_hdr_t and siena_mc_dynamic_config_hdr_t structures
+ * (defined in <ci/mgmt/mc_flash_layout.h> and <ci/mgmt/mc_dynamic_cfg.h>) for
+ * Siena.
+ *
+ * These are used directly by the MC and should also be usable directly on host
+ * systems which are little-endian and do not do strange things with structure
+ * padding. (Big-endian host systems will require some byte-swapping.)
+ *
+ * -----
+ *
+ * Please refer to SF-108797-SW for a general overview of the TLV partition
+ * format.
+ *
+ * -----
+ *
+ * The current tag IDs have a general structure: with the exception of the
+ * special values defined in the document, they are of the form 0xLTTTNNNN,
+ * where:
+ *
+ * - L is a location, indicating where this tag is expected to be found:
+ * 0: static configuration
+ * 1: dynamic configuration
+ * 2: firmware internal use
+ * 3: license partition
+ *
+ * - TTT is a type, which is just a unique value. The same type value
+ * might appear in both locations, indicating a relationship between
+ * the items (e.g. static and dynamic VPD below).
+ *
+ * - NNNN is an index of some form. Some item types are per-port, some
+ * are per-PF, some are per-partition-type.
+ *
+ * -----
+ *
+ * As with the previous Siena structures, each structure here is laid out
+ * carefully: values are aligned to their natural boundary, with explicit
+ * padding fields added where necessary. (No, technically this does not
+ * absolutely guarantee portability. But, in practice, compilers are generally
+ * sensible enough not to introduce completely pointless padding, and it works
+ * well enough.)
+ */
+
+
+#ifndef CI_MGMT_TLV_LAYOUT_H
+#define CI_MGMT_TLV_LAYOUT_H
+
+
+/* ----------------------------------------------------------------------------
+ * General structure (defined by SF-108797-SW)
+ * ----------------------------------------------------------------------------
+ */
+
+
+/* The "end" tag.
+ *
+ * (Note that this is *not* followed by length or value fields: anything after
+ * the tag itself is irrelevant.)
+ */
+
+#define TLV_TAG_END (0xEEEEEEEE)
+
+
+/* Other special reserved tag values.
+ */
+
+#define TLV_TAG_SKIP (0x00000000)
+#define TLV_TAG_INVALID (0xFFFFFFFF)
+
+
+/* TLV partition header.
+ *
+ * In a TLV partition, this must be the first item in the sequence, at offset
+ * 0.
+ */
+
+#define TLV_TAG_PARTITION_HEADER (0xEF10DA7A)
+
+struct tlv_partition_header {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t type_id;
+/* 0 indicates the default segment (always located at offset 0), while other values
+ * are for RFID-selectable presets that should immediately follow the default segment.
+ * The default segment may also have preset > 0, which means that it is a preset
+ * selected through an RFID command and copied by FW to the location at offset 0. */
+ uint16_t preset;
+ uint32_t generation;
+ uint32_t total_length;
+};
+
+
+/* TLV partition trailer.
+ *
+ * In a TLV partition, this must be the last item in the sequence, immediately
+ * preceding the TLV_TAG_END word.
+ */
+
+#define TLV_TAG_PARTITION_TRAILER (0xEF101A57)
+
+struct tlv_partition_trailer {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t generation;
+ uint32_t checksum;
+};
+
+
+/* Appendable TLV partition header.
+ *
+ * In an appendable TLV partition, this must be the first item in the sequence,
+ * at offset 0. (Note that, unlike the configuration partitions, there is no
+ * trailer before the TLV_TAG_END word.)
+ */
+
+#define TLV_TAG_APPENDABLE_PARTITION_HEADER (0xEF10ADA7)
+
+struct tlv_appendable_partition_header {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t type_id;
+ uint16_t reserved;
+};
+
+
+/* ----------------------------------------------------------------------------
+ * Configuration items
+ * ----------------------------------------------------------------------------
+ */
+
+
+/* NIC global capabilities.
+ */
+
+#define TLV_TAG_GLOBAL_CAPABILITIES (0x00010000)
+
+struct tlv_global_capabilities {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t flags;
+};
+
+
+/* Siena-style per-port MAC address allocation.
+ *
+ * There are <count> addresses, starting at <base_address> and incrementing
+ * by adding <stride> to the low-order byte(s).
+ *
+ * (See also TLV_TAG_GLOBAL_MAC for an alternative, specifying a global pool
+ * of contiguous MAC addresses for the firmware to allocate as it sees fit.)
+ */
+
+#define TLV_TAG_PORT_MAC(port) (0x00020000 + (port))
+
+struct tlv_port_mac {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t base_address[6];
+ uint16_t reserved;
+ uint16_t count;
+ uint16_t stride;
+};
+
+
+/* Static VPD.
+ *
+ * This is the portion of VPD which is set at manufacturing time and not
+ * expected to change. It is formatted as a standard PCI VPD block. There are
+ * global and per-pf TLVs for this, the global TLV is new for Medford and is
+ * used in preference to the per-pf TLV.
+ */
+
+#define TLV_TAG_PF_STATIC_VPD(pf) (0x00030000 + (pf))
+
+struct tlv_pf_static_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+#define TLV_TAG_GLOBAL_STATIC_VPD (0x001f0000)
+
+struct tlv_global_static_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+
+/* Dynamic VPD.
+ *
+ * This is the portion of VPD which may be changed (e.g. by firmware updates).
+ * It is formatted as a standard PCI VPD block. There are global and per-pf TLVs
+ * for this, the global TLV is new for Medford and is used in preference to the
+ * per-pf TLV.
+ */
+
+#define TLV_TAG_PF_DYNAMIC_VPD(pf) (0x10030000 + (pf))
+
+struct tlv_pf_dynamic_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+#define TLV_TAG_GLOBAL_DYNAMIC_VPD (0x10200000)
+
+struct tlv_global_dynamic_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+
+/* "DBI" PCI config space changes.
+ *
+ * This is a set of edits made to the default PCI config space values before
+ * the device is allowed to enumerate. There are global and per-pf TLVs for
+ * this, the global TLV is new for Medford and is used in preference to the
+ * per-pf TLV.
+ */
+
+#define TLV_TAG_PF_DBI(pf) (0x00040000 + (pf))
+
+struct tlv_pf_dbi {
+ uint32_t tag;
+ uint32_t length;
+ struct {
+ uint16_t addr;
+ uint16_t byte_enables;
+ uint32_t value;
+ } items[];
+};
+
+
+#define TLV_TAG_GLOBAL_DBI (0x00210000)
+
+struct tlv_global_dbi {
+ uint32_t tag;
+ uint32_t length;
+ struct {
+ uint16_t addr;
+ uint16_t byte_enables;
+ uint32_t value;
+ } items[];
+};
+
+
+/* Partition subtype codes.
+ *
+ * A subtype may optionally be stored for each type of partition present in
+ * the NVRAM. For example, this may be used to allow a generic firmware update
+ * utility to select a specific variant of firmware for a specific variant of
+ * board.
+ *
+ * The description[] field is an optional string which is returned in the
+ * MC_CMD_NVRAM_METADATA response if present.
+ */
+
+#define TLV_TAG_PARTITION_SUBTYPE(type) (0x00050000 + (type))
+
+struct tlv_partition_subtype {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t subtype;
+ uint8_t description[];
+};
+
+
+/* Partition version codes.
+ *
+ * A version may optionally be stored for each type of partition present in
+ * the NVRAM. This provides a standard way of tracking the currently stored
+ * version of each of the various component images.
+ */
+
+#define TLV_TAG_PARTITION_VERSION(type) (0x10060000 + (type))
+
+struct tlv_partition_version {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t version_w;
+ uint16_t version_x;
+ uint16_t version_y;
+ uint16_t version_z;
+};
+
+/* Global PCIe configuration */
+
+#define TLV_TAG_GLOBAL_PCIE_CONFIG (0x10070000)
+
+struct tlv_pcie_config {
+ uint32_t tag;
+ uint32_t length;
+ int16_t max_pf_number; /**< Largest PF RID (lower PFs may be hidden) */
+ uint16_t pf_aper; /**< BIU aperture for PF BAR2 */
+ uint16_t vf_aper; /**< BIU aperture for VF BAR0 */
+ uint16_t int_aper; /**< BIU aperture for PF BAR4 and VF BAR2 */
+#define TLV_MAX_PF_DEFAULT (-1) /* Use FW default for largest PF RID */
+#define TLV_APER_DEFAULT (0xFFFF) /* Use FW default for a given aperture */
+};
+
+/* Per-PF configuration. Note that not all these fields are necessarily useful
+ * as the apertures are constrained by the BIU settings (the one case we do
+ * use is to make BAR2 bigger than the BIU thinks to reserve space), but we can
+ * tidy things up later */
+
+#define TLV_TAG_PF_PCIE_CONFIG(pf) (0x10080000 + (pf))
+
+struct tlv_per_pf_pcie_config {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t vfs_total;
+ uint8_t port_allocation;
+ uint16_t vectors_per_pf;
+ uint16_t vectors_per_vf;
+ uint8_t pf_bar0_aperture;
+ uint8_t pf_bar2_aperture;
+ uint8_t vf_bar0_aperture;
+ uint8_t vf_base;
+ uint16_t supp_pagesz;
+ uint16_t msix_vec_base;
+};
+
+
+/* Development ONLY. This is a single TLV tag for all the gubbins
+ * that can be set through the MC command-line other than the PCIe
+ * settings. This is a temporary measure. */
+#define TLV_TAG_TMP_GUBBINS (0x10090000) /* legacy symbol - do not use */
+#define TLV_TAG_TMP_GUBBINS_HUNT TLV_TAG_TMP_GUBBINS
+
+struct tlv_tmp_gubbins {
+ uint32_t tag;
+ uint32_t length;
+ /* Consumed by dpcpu.c */
+ uint64_t tx0_tags; /* Bitmap */
+ uint64_t tx1_tags; /* Bitmap */
+ uint64_t dl_tags; /* Bitmap */
+ uint32_t flags;
+#define TLV_DPCPU_TX_STRIPE (1) /* No longer used, has no effect */
+#define TLV_DPCPU_BIU_TAGS (2) /* Use BIU tag manager */
+#define TLV_DPCPU_TX0_TAGS (4) /* tx0_tags is valid */
+#define TLV_DPCPU_TX1_TAGS (8) /* tx1_tags is valid */
+#define TLV_DPCPU_DL_TAGS (16) /* dl_tags is valid */
+ /* Consumed by features.c */
+ uint32_t dut_features; /* All 1s -> leave alone */
+ int8_t with_rmon; /* 0 -> off, 1 -> on, -1 -> leave alone */
+ /* Consumed by clocks_hunt.c */
+ int8_t clk_mode; /* 0 -> off, 1 -> on, -1 -> leave alone */
+ /* No longer used, superseded by TLV_TAG_DESCRIPTOR_CACHE_CONFIG. */
+ int8_t rx_dc_size; /* -1 -> leave alone */
+ int8_t tx_dc_size;
+ int16_t num_q_allocs;
+};
+
+/* Global port configuration
+ *
+ * This is now deprecated in favour of a platform-provided default
+ * and dynamic config override via tlv_global_port_options.
+ */
+#define TLV_TAG_GLOBAL_PORT_CONFIG (0x000a0000)
+
+struct tlv_global_port_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t ports_per_core;
+ uint32_t max_port_speed;
+};
+
+
+/* Firmware options.
+ *
+ * This is intended for user-configurable selection of optional firmware
+ * features and variants.
+ *
+ * Initially, this consists only of the satellite CPU firmware variant
+ * selection, but this tag could be extended in the future (using the
+ * tag length to determine whether additional fields are present).
+ */
+
+#define TLV_TAG_FIRMWARE_OPTIONS (0x100b0000)
+
+struct tlv_firmware_options {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t firmware_variant;
+#define TLV_FIRMWARE_VARIANT_DRIVER_SELECTED (0xffffffff)
+
+/* These are the values for overriding the driver's choice; the definitions
+ * are taken from MCDI so that they don't get out of step. Include
+ * <ci/mgmt/mc_driver_pcol.h> or the equivalent from your driver's tree if
+ * you need to use these constants.
+ */
+#define TLV_FIRMWARE_VARIANT_FULL_FEATURED MC_CMD_FW_FULL_FEATURED
+#define TLV_FIRMWARE_VARIANT_LOW_LATENCY MC_CMD_FW_LOW_LATENCY
+#define TLV_FIRMWARE_VARIANT_PACKED_STREAM MC_CMD_FW_PACKED_STREAM
+#define TLV_FIRMWARE_VARIANT_HIGH_TX_RATE MC_CMD_FW_HIGH_TX_RATE
+#define TLV_FIRMWARE_VARIANT_PACKED_STREAM_HASH_MODE_1 \
+ MC_CMD_FW_PACKED_STREAM_HASH_MODE_1
+#define TLV_FIRMWARE_VARIANT_RULES_ENGINE MC_CMD_FW_RULES_ENGINE
+};
+
+/* Voltage settings
+ *
+ * Intended for boards with A0 silicon where the core voltage may
+ * need tweaking. Most likely set once when the pass voltage is
+ * determined. */
+
+#define TLV_TAG_0V9_SETTINGS (0x000c0000)
+
+struct tlv_0v9_settings {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t flags; /* Boards with high 0v9 settings may need active cooling */
+#define TLV_TAG_0V9_REQUIRES_FAN (1)
+ uint16_t target_voltage; /* In millivolts */
+ /* Since the limits are meant to be centred to the target (and must at least
+ * contain it) they need setting as well. */
+ uint16_t warn_low; /* In millivolts */
+ uint16_t warn_high; /* In millivolts */
+ uint16_t panic_low; /* In millivolts */
+ uint16_t panic_high; /* In millivolts */
+};
+
+
+/* Clock configuration */
+
+#define TLV_TAG_CLOCK_CONFIG (0x000d0000) /* legacy symbol - do not use */
+#define TLV_TAG_CLOCK_CONFIG_HUNT TLV_TAG_CLOCK_CONFIG
+
+struct tlv_clock_config {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t clk_sys; /* MHz */
+ uint16_t clk_dpcpu; /* MHz */
+ uint16_t clk_icore; /* MHz */
+ uint16_t clk_pcs; /* MHz */
+};
+
+#define TLV_TAG_CLOCK_CONFIG_MEDFORD (0x00100000)
+
+struct tlv_clock_config_medford {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t clk_sys; /* MHz */
+ uint16_t clk_mc; /* MHz */
+ uint16_t clk_rmon; /* MHz */
+ uint16_t clk_vswitch; /* MHz */
+ uint16_t clk_dpcpu; /* MHz */
+ uint16_t clk_pcs; /* MHz */
+};
+
+
+/* EF10-style global pool of MAC addresses.
+ *
+ * There are <count> addresses, starting at <base_address>, which are
+ * contiguous. Firmware is responsible for allocating addresses from this
+ * pool to ports / PFs as appropriate.
+ */
+
+#define TLV_TAG_GLOBAL_MAC (0x000e0000)
+
+struct tlv_global_mac {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t base_address[6];
+ uint16_t reserved1;
+ uint16_t count;
+ uint16_t reserved2;
+};
+
+#define TLV_TAG_ATB_0V9_TARGET (0x000f0000) /* legacy symbol - do not use */
+#define TLV_TAG_ATB_0V9_TARGET_HUNT TLV_TAG_ATB_0V9_TARGET
+
+/* The target value for the 0v9 power rail measured on-chip at the
+ * analogue test bus */
+struct tlv_0v9_atb_target {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t millivolts;
+ uint16_t reserved;
+};
+
+/* Factory settings for amplitude calibration of the PCIE TX serdes */
+#define TLV_TAG_TX_PCIE_AMP_CONFIG (0x00220000)
+struct tlv_pcie_tx_amp_config {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t quad_tx_imp2k[4];
+ uint8_t quad_tx_imp50[4];
+ uint8_t lane_amp[16];
+};
+
+
+/* Global PCIe configuration, second revision. This represents the visible PFs
+ * by a bitmap rather than having the number of the highest visible one. As such
+ * it can (for a 16-PF chip) represent a superset of what TLV_TAG_GLOBAL_PCIE_CONFIG
+ * can and it should be used in place of that tag in future (but compatibility with
+ * the old tag will be left in the firmware indefinitely). */
+
+#define TLV_TAG_GLOBAL_PCIE_CONFIG_R2 (0x10100000)
+
+struct tlv_pcie_config_r2 {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t visible_pfs; /**< Bitmap of visible PFs */
+ uint16_t pf_aper; /**< BIU aperture for PF BAR2 */
+ uint16_t vf_aper; /**< BIU aperture for VF BAR0 */
+ uint16_t int_aper; /**< BIU aperture for PF BAR4 and VF BAR2 */
+};
+
+/* Dynamic port mode.
+ *
+ * Allows selecting alternate port configuration for platforms that support it
+ * (e.g. 1x40G vs 2x10G on Milano, 1x40G vs 4x10G on Medford). This affects the
+ * number of externally visible ports (and, hence, PF to port mapping), so must
+ * be done at boot time.
+ *
+ * This tag supercedes tlv_global_port_config.
+ */
+
+#define TLV_TAG_GLOBAL_PORT_MODE (0x10110000)
+
+struct tlv_global_port_mode {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t port_mode;
+#define TLV_PORT_MODE_DEFAULT (0xffffffff) /* Default for given platform */
+#define TLV_PORT_MODE_10G (0) /* 10G, single SFP/10G-KR */
+#define TLV_PORT_MODE_40G (1) /* 40G, single QSFP/40G-KR */
+#define TLV_PORT_MODE_10G_10G (2) /* 2x10G, dual SFP/10G-KR or single QSFP */
+#define TLV_PORT_MODE_40G_40G (3) /* 40G + 40G, dual QSFP/40G-KR (Greenport, Medford) */
+#define TLV_PORT_MODE_10G_10G_10G_10G (4) /* 2x10G + 2x10G, quad SFP/10G-KR or dual QSFP (Greenport) */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q1 (4) /* 4x10G, single QSFP, cage 0 (Medford) */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q (5) /* 4x10G, single QSFP, cage 0 (Medford) OBSOLETE DO NOT USE */
+#define TLV_PORT_MODE_40G_10G_10G (6) /* 1x40G + 2x10G, dual QSFP (Greenport, Medford) */
+#define TLV_PORT_MODE_10G_10G_40G (7) /* 2x10G + 1x40G, dual QSFP (Greenport, Medford) */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q2 (8) /* 4x10G, single QSFP, cage 1 (Medford) */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2 (9) /* 2x10G + 2x10G, dual QSFP (Medford) */
+#define TLV_PORT_MODE_MAX TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2
+};
+
+/* Type of the v-switch created implicitly by the firmware */
+
+#define TLV_TAG_VSWITCH_TYPE(port) (0x10120000 + (port))
+
+struct tlv_vswitch_type {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t vswitch_type;
+#define TLV_VSWITCH_TYPE_DEFAULT (0xffffffff) /* Firmware default; equivalent to no TLV present for a given port */
+#define TLV_VSWITCH_TYPE_NONE (0)
+#define TLV_VSWITCH_TYPE_VLAN (1)
+#define TLV_VSWITCH_TYPE_VEB (2)
+#define TLV_VSWITCH_TYPE_VEPA (3)
+#define TLV_VSWITCH_TYPE_MUX (4)
+#define TLV_VSWITCH_TYPE_TEST (5)
+};
+
+/* A VLAN tag for the v-port created implicitly by the firmware */
+
+#define TLV_TAG_VPORT_VLAN_TAG(pf) (0x10130000 + (pf))
+
+struct tlv_vport_vlan_tag {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t vlan_tag;
+#define TLV_VPORT_NO_VLAN_TAG (0xFFFFFFFF) /* Default in the absence of TLV for a given PF */
+};
+
+/* Offset to be applied to the 0v9 setting, wherever it came from */
+
+#define TLV_TAG_ATB_0V9_OFFSET (0x10140000)
+
+struct tlv_0v9_atb_offset {
+ uint32_t tag;
+ uint32_t length;
+ int16_t offset_millivolts;
+ uint16_t reserved;
+};
+
+/* A privilege mask given on reset to all non-admin PCIe functions (that is other than first-PF-per-port).
+ * The meaning of particular bits is defined in mcdi_ef10.yml under MC_CMD_PRIVILEGE_MASK, see also bug 44583.
+ * TLV_TAG_PRIVILEGE_MASK_ADD specifies bits that should be added (ORed) to firmware default while
+ * TLV_TAG_PRIVILEGE_MASK_REM specifies bits that should be removed (ANDed) from firmware default:
+ * Initial_privilege_mask = (firmware_default_mask | privilege_mask_add) & ~privilege_mask_rem */
+
+#define TLV_TAG_PRIVILEGE_MASK (0x10150000) /* legacy symbol - do not use */
+
+struct tlv_privilege_mask { /* legacy structure - do not use */
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask;
+};
+
+#define TLV_TAG_PRIVILEGE_MASK_ADD (0x10150000)
+
+struct tlv_privilege_mask_add {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_add;
+};
+
+#define TLV_TAG_PRIVILEGE_MASK_REM (0x10160000)
+
+struct tlv_privilege_mask_rem {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_rem;
+};
+
+/* Additional privileges given to all PFs.
+ * This tag takes precedence over TLV_TAG_PRIVILEGE_MASK_REM. */
+
+#define TLV_TAG_PRIVILEGE_MASK_ADD_ALL_PFS (0x10190000)
+
+struct tlv_privilege_mask_add_all_pfs {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_add;
+};
+
+/* Additional privileges given to a selected PF.
+ * This tag takes precedence over TLV_TAG_PRIVILEGE_MASK_REM. */
+
+#define TLV_TAG_PRIVILEGE_MASK_ADD_SINGLE_PF(pf) (0x101A0000 + (pf))
+
+struct tlv_privilege_mask_add_single_pf {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_add;
+};
+
+/* Turning on/off the PFIOV mode.
+ * This tag only takes effect if TLV_TAG_VSWITCH_TYPE is missing or set to DEFAULT. */
+
+#define TLV_TAG_PFIOV(port) (0x10170000 + (port))
+
+struct tlv_pfiov {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t pfiov;
+#define TLV_PFIOV_OFF (0) /* Default */
+#define TLV_PFIOV_ON (1)
+};
+
+/* Multicast filter chaining mode selection.
+ *
+ * When enabled, multicast packets are delivered to all recipients of all
+ * matching multicast filters, with the exception that IP multicast filters
+ * will steal traffic from MAC multicast filters on a per-function basis.
+ * (New behaviour.)
+ *
+ * When disabled, multicast packets will always be delivered only to the
+ * recipients of the highest priority matching multicast filter.
+ * (Legacy behaviour.)
+ *
+ * The DEFAULT mode (which is the same as the tag not being present at all)
+ * is equivalent to ENABLED in production builds, and DISABLED in eftest
+ * builds.
+ *
+ * This option is intended to provide run-time control over this feature
+ * while it is being stabilised and may be withdrawn at some point in the
+ * future; the new behaviour is intended to become the standard behaviour.
+ */
+
+#define TLV_TAG_MCAST_FILTER_CHAINING (0x10180000)
+
+struct tlv_mcast_filter_chaining {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t mode;
+#define TLV_MCAST_FILTER_CHAINING_DEFAULT (0xffffffff)
+#define TLV_MCAST_FILTER_CHAINING_DISABLED (0)
+#define TLV_MCAST_FILTER_CHAINING_ENABLED (1)
+};
+
+/* Pacer rate limit per PF */
+#define TLV_TAG_RATE_LIMIT(pf) (0x101b0000 + (pf))
+
+struct tlv_rate_limit {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t rate_mbps;
+};
+
+/* OCSD Enable/Disable
+ *
+ * This setting allows OCSD to be disabled. This is a requirement for HP
+ * servers to support PCI passthrough for virtualization.
+ *
+ * The DEFAULT mode (which is the same as the tag not being present) is
+ * equivalent to ENABLED.
+ *
+ * This option is not used by the MCFW, and is entirely handled by the various
+ * drivers that support OCSD, by reading the setting before they attempt
+ * to enable OCSD.
+ *
+ * bit0: OCSD Disabled/Enabled
+ */
+
+#define TLV_TAG_OCSD (0x101C0000)
+
+struct tlv_ocsd {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t mode;
+#define TLV_OCSD_DISABLED 0
+#define TLV_OCSD_ENABLED 1 /* Default */
+};
+
+/* Descriptor cache config.
+ *
+ * Sets the sizes of the TX and RX descriptor caches as a power of 2. It also
+ * sets the total number of VIs. When the number of VIs is reduced VIs are taken
+ * away from the highest numbered port first, so a vi_count of 1024 means 1024
+ * VIs on the first port and 0 on the second (on a Torino).
+ */
+
+#define TLV_TAG_DESCRIPTOR_CACHE_CONFIG (0x101d0000)
+
+struct tlv_descriptor_cache_config {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t rx_desc_cache_size;
+ uint8_t tx_desc_cache_size;
+ uint16_t vi_count;
+};
+#define TLV_DESC_CACHE_DEFAULT (0xff)
+#define TLV_VI_COUNT_DEFAULT (0xffff)
+
+/* RX event merging config (read batching).
+ *
+ * Sets the global maximum number of events for the merging bins, and the
+ * global timeout configuration for the bins.
+ */
+
+#define TLV_TAG_RX_EVENT_MERGING_CONFIG (0x101e0000)
+
+struct tlv_rx_event_merging_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t max_events;
+#define TLV_RX_EVENT_MERGING_CONFIG_MAX_EVENTS_MAX ((1 << 4) - 1)
+ uint32_t timeout_ns;
+};
+#define TLV_RX_EVENT_MERGING_MAX_EVENTS_DEFAULT (0xffffffff)
+#define TLV_RX_EVENT_MERGING_TIMEOUT_NS_DEFAULT (0xffffffff)
+
+#define TLV_TAG_PCIE_LINK_SETTINGS (0x101f0000)
+struct tlv_pcie_link_settings {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t gen; /* Target PCIe generation: 1, 2, 3 */
+ uint16_t width; /* Number of lanes */
+};
+
+/* TX event merging config.
+ *
+ * Sets the global maximum number of events for the merging bins, and the
+ * global timeout configuration for the bins, and the global timeout for
+ * empty queues.
+ */
+#define TLV_TAG_TX_EVENT_MERGING_CONFIG (0x10210000)
+struct tlv_tx_event_merging_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t max_events;
+#define TLV_TX_EVENT_MERGING_CONFIG_MAX_EVENTS_MAX ((1 << 4) - 1)
+ uint32_t timeout_ns;
+ uint32_t qempty_timeout_ns; /* Medford only */
+};
+#define TLV_TX_EVENT_MERGING_MAX_EVENTS_DEFAULT (0xffffffff)
+#define TLV_TX_EVENT_MERGING_TIMEOUT_NS_DEFAULT (0xffffffff)
+#define TLV_TX_EVENT_MERGING_QEMPTY_TIMEOUT_NS_DEFAULT (0xffffffff)
+
+#define TLV_TAG_LICENSE (0x30800000)
+
+typedef struct tlv_license {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t data[];
+} tlv_license_t;
+
+/* TSA NIC IP address configuration
+ *
+ * Sets the TSA NIC IP address statically via configuration tool or dynamically
+ * via DHCP via snooping based on the mode selection (0=Static, 1=DHCP, 2=Snoop)
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_TSAN_CONFIG (0x10220000)
+
+#define TLV_TSAN_IP_MODE_STATIC (0)
+#define TLV_TSAN_IP_MODE_DHCP (1)
+#define TLV_TSAN_IP_MODE_SNOOP (2)
+typedef struct tlv_tsan_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t mode;
+ uint32_t ip;
+ uint32_t netmask;
+ uint32_t gateway;
+ uint32_t port;
+ uint32_t bind_retry; /* DEPRECATED */
+ uint32_t bind_bkout; /* DEPRECATED */
+} tlv_tsan_config_t;
+
+/* TSA Controller IP address configuration
+ *
+ * Sets the TSA Controller IP address statically via configuration tool
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_TSAC_CONFIG (0x10230000)
+
+#define TLV_MAX_TSACS (4)
+typedef struct tlv_tsac_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t num_tsacs;
+ uint32_t ip[TLV_MAX_TSACS];
+ uint32_t port[TLV_MAX_TSACS];
+} tlv_tsac_config_t;
+
+/* Binding ticket
+ *
+ * Sets the TSA NIC binding ticket used for binding process between the TSA NIC
+ * and the TSA Controller
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_BINDING_TICKET (0x10240000)
+
+typedef struct tlv_binding_ticket {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+} tlv_binding_ticket_t;
+
+/* Solarflare private key (DEPRECATED)
+ *
+ * Sets the Solareflare private key used for signing during the binding process
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_PIK_SF (0x10250000) /* DEPRECATED */
+
+typedef struct tlv_pik_sf {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+} tlv_pik_sf_t;
+
+/* CA root certificate
+ *
+ * Sets the CA root certificate used for TSA Controller verfication during
+ * TLS connection setup between the TSA NIC and the TSA Controller
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_CA_ROOT_CERT (0x10260000)
+
+typedef struct tlv_ca_root_cert {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+} tlv_ca_root_cert_t;
+
+/* Tx vFIFO Low latency configuration
+ *
+ * To keep the desired booting behaviour for the switch, it just requires to
+ * know if the low latency mode is enabled.
+ */
+
+#define TLV_TAG_TX_VFIFO_ULL_MODE (0x10270000)
+struct tlv_tx_vfifo_ull_mode {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t mode;
+#define TLV_TX_VFIFO_ULL_MODE_DEFAULT 0
+};
+
+/* BIU mode
+ *
+ * Medford2 tag for selecting VI window decode (see values below)
+ */
+#define TLV_TAG_BIU_VI_WINDOW_MODE (0x10280000)
+struct tlv_biu_vi_window_mode {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t mode;
+#define TLV_BIU_VI_WINDOW_MODE_8K 0 /* 8k per VI, CTPIO not mapped, medford/hunt compatible */
+#define TLV_BIU_VI_WINDOW_MODE_16K 1 /* 16k per VI, CTPIO mapped */
+#define TLV_BIU_VI_WINDOW_MODE_64K 2 /* 64k per VI, CTPIO mapped, POWER-friendly */
+};
+
+/* FastPD mode
+ *
+ * Medford2 tag for configuring the FastPD mode (see values below)
+ */
+#define TLV_TAG_FASTPD_MODE(port) (0x10290000 + (port))
+struct tlv_fastpd_mode {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t mode;
+#define TLV_FASTPD_MODE_SOFT_ALL 0 /* All packets to the SoftPD */
+#define TLV_FASTPD_MODE_FAST_ALL 1 /* All packets to the FastPD */
+#define TLV_FASTPD_MODE_FAST_SUPPORTED 2 /* Supported packet types to the FastPD; everything else to the SoftPD */
+};
+
+#endif /* CI_MGMT_TLV_LAYOUT_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/ef10_tx.c b/src/seastar/dpdk/drivers/net/sfc/base/ef10_tx.c
new file mode 100644
index 00000000..0f8e9b1b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/ef10_tx.c
@@ -0,0 +1,710 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_QSTATS
+#define EFX_TX_QSTAT_INCR(_etp, _stat) \
+ do { \
+ (_etp)->et_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_TX_QSTAT_INCR(_etp, _stat)
+#endif
+
+static __checkReturn efx_rc_t
+efx_mcdi_init_txq(
+ __in efx_nic_t *enp,
+ __in uint32_t size,
+ __in uint32_t target_evq,
+ __in uint32_t label,
+ __in uint32_t instance,
+ __in uint16_t flags,
+ __in efsys_mem_t *esmp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
+ MC_CMD_INIT_TXQ_OUT_LEN)];
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ int npages;
+ int i;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
+ EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs));
+
+ npages = EFX_TXQ_NBUFS(size);
+ if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_INIT_TXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
+
+ MCDI_IN_POPULATE_DWORD_7(req, INIT_TXQ_IN_FLAGS,
+ INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
+ INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
+ (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
+ INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
+ (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
+ INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
+ INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
+ INIT_TXQ_IN_CRC_MODE, 0,
+ INIT_TXQ_IN_FLAG_TIMESTAMP, 0);
+
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
+
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fini_txq(
+ __in efx_nic_t *enp,
+ __in uint32_t instance)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN,
+ MC_CMD_FINI_TXQ_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FINI_TXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_init(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+ return (0);
+}
+
+ void
+ef10_tx_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp)
+{
+ efx_qword_t desc;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(id))
+
+ if ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags,
+ esmp)) != 0)
+ goto fail1;
+
+ /*
+ * A previous user of this TX queue may have written a descriptor to the
+ * TX push collector, but not pushed the doorbell (e.g. after a crash).
+ * The next doorbell write would then push the stale descriptor.
+ *
+ * Ensure the (per network port) TX push collector is cleared by writing
+ * a no-op TX option descriptor. See bug29981 for details.
+ */
+ *addedp = 1;
+ EFX_POPULATE_QWORD_4(desc,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
+ ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
+ (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
+ ESF_DZ_TX_OPTION_IP_CSUM,
+ (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc);
+ ef10_tx_qpush(etp, *addedp, 0);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qdestroy(
+ __in efx_txq_t *etp)
+{
+ /* FIXME */
+ _NOTE(ARGUNUSED(etp))
+ /* FIXME */
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpio_enable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_piobuf_handle_t handle;
+ efx_rc_t rc;
+
+ if (etp->et_pio_size != 0) {
+ rc = EALREADY;
+ goto fail1;
+ }
+
+ /* Sub-allocate a PIO block from a piobuf */
+ if ((rc = ef10_nic_pio_alloc(enp,
+ &etp->et_pio_bufnum,
+ &handle,
+ &etp->et_pio_blknum,
+ &etp->et_pio_offset,
+ &etp->et_pio_size)) != 0) {
+ goto fail2;
+ }
+ EFSYS_ASSERT3U(etp->et_pio_size, !=, 0);
+
+ /* Link the piobuf to this TXQ */
+ if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) {
+ goto fail3;
+ }
+
+ /*
+ * et_pio_offset is the offset of the sub-allocated block within the
+ * hardware PIO buffer. It is used as the buffer address in the PIO
+ * option descriptor.
+ *
+ * et_pio_write_offset is the offset of the sub-allocated block from the
+ * start of the write-combined memory mapping, and is used for writing
+ * data into the PIO buffer.
+ */
+ etp->et_pio_write_offset =
+ (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) +
+ ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+ ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
+fail2:
+ EFSYS_PROBE(fail2);
+ etp->et_pio_size = 0;
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qpio_disable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+
+ if (etp->et_pio_size != 0) {
+ /* Unlink the piobuf from this TXQ */
+ ef10_nic_pio_unlink(enp, etp->et_index);
+
+ /* Free the sub-allocated PIO block */
+ ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
+ etp->et_pio_size = 0;
+ etp->et_pio_write_offset = 0;
+ }
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(length) uint8_t *buffer,
+ __in size_t length,
+ __in size_t offset)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efsys_bar_t *esbp = enp->en_esbp;
+ uint32_t write_offset;
+ uint32_t write_offset_limit;
+ efx_qword_t *eqp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0);
+
+ if (etp->et_pio_size == 0) {
+ rc = ENOENT;
+ goto fail1;
+ }
+ if (offset + length > etp->et_pio_size) {
+ rc = ENOSPC;
+ goto fail2;
+ }
+
+ /*
+ * Writes to PIO buffers must be 64 bit aligned, and multiples of
+ * 64 bits.
+ */
+ write_offset = etp->et_pio_write_offset + offset;
+ write_offset_limit = write_offset + length;
+ eqp = (efx_qword_t *)buffer;
+ while (write_offset < write_offset_limit) {
+ EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp);
+ eqp++;
+ write_offset += sizeof (efx_qword_t);
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_qword_t pio_desc;
+ unsigned int id;
+ size_t offset;
+ unsigned int added = *addedp;
+ efx_rc_t rc;
+
+
+ if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ if (etp->et_pio_size == 0) {
+ rc = ENOENT;
+ goto fail2;
+ }
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_qword_t);
+
+ EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index,
+ unsigned int, id, uint32_t, etp->et_pio_offset,
+ size_t, pkt_length);
+
+ EFX_POPULATE_QWORD_5(pio_desc,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE, 1,
+ ESF_DZ_TX_PIO_CONT, 0,
+ ESF_DZ_TX_PIO_BYTE_CNT, pkt_length,
+ ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc);
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST_PIO);
+
+ *addedp = added;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ for (i = 0; i < n; i++) {
+ efx_buffer_t *ebp = &eb[i];
+ efsys_dma_addr_t addr = ebp->eb_addr;
+ size_t size = ebp->eb_size;
+ boolean_t eop = ebp->eb_eop;
+ unsigned int id;
+ size_t offset;
+ efx_qword_t qword;
+
+ /* No limitations on boundary crossing */
+ EFSYS_ASSERT(size <=
+ etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_qword_t);
+
+ EFSYS_PROBE5(tx_post, unsigned int, etp->et_index,
+ unsigned int, id, efsys_dma_addr_t, addr,
+ size_t, size, boolean_t, eop);
+
+ EFX_POPULATE_QWORD_5(qword,
+ ESF_DZ_TX_KER_TYPE, 0,
+ ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
+ ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
+ ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
+ ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword);
+ }
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * This improves performance by pushing a TX descriptor at the same time as the
+ * doorbell. The descriptor must be added to the TXQ, so that can be used if the
+ * hardware decides not to use the pushed descriptor.
+ */
+ void
+ef10_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed)
+{
+ efx_nic_t *enp = etp->et_enp;
+ unsigned int wptr;
+ unsigned int id;
+ size_t offset;
+ efx_qword_t desc;
+ efx_oword_t oword;
+
+ wptr = added & etp->et_mask;
+ id = pushed & etp->et_mask;
+ offset = id * sizeof (efx_qword_t);
+
+ EFSYS_MEM_READQ(etp->et_esmp, offset, &desc);
+ EFX_POPULATE_OWORD_3(oword,
+ ERF_DZ_TX_DESC_WPTR, wptr,
+ ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
+ ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, etp->et_index,
+ &oword);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ for (i = 0; i < n; i++) {
+ efx_desc_t *edp = &ed[i];
+ unsigned int id;
+ size_t offset;
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_desc_t);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
+ }
+
+ EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
+ unsigned int, added, unsigned int, n);
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp)
+{
+ /* No limitations on boundary crossing */
+ EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
+
+ EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
+ efsys_dma_addr_t, addr,
+ size_t, size, boolean_t, eop);
+
+ EFX_POPULATE_QWORD_5(edp->ed_eq,
+ ESF_DZ_TX_KER_TYPE, 0,
+ ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
+ ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
+ ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
+ ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
+}
+
+ void
+ef10_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp)
+{
+ EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,
+ uint16_t, ipv4_id, uint32_t, tcp_seq,
+ uint8_t, tcp_flags);
+
+ EFX_POPULATE_QWORD_5(edp->ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
+ ESF_DZ_TX_TSO_IP_ID, ipv4_id,
+ ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
+}
+
+ void
+ef10_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t tcp_mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count)
+{
+ EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,
+ uint16_t, ipv4_id, uint32_t, tcp_seq,
+ uint16_t, tcp_mss);
+
+ EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
+
+ EFX_POPULATE_QWORD_5(edp[0].ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_OPTION_TYPE,
+ ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
+ ESF_DZ_TX_TSO_IP_ID, ipv4_id,
+ ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
+ EFX_POPULATE_QWORD_4(edp[1].ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_OPTION_TYPE,
+ ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
+ ESF_DZ_TX_TSO_TCP_MSS, tcp_mss);
+}
+
+ void
+ef10_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t tci,
+ __out efx_desc_t *edp)
+{
+ EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,
+ uint16_t, tci);
+
+ EFX_POPULATE_QWORD_4(edp->ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_VLAN,
+ ESF_DZ_TX_VLAN_OP, tci ? 1 : 0,
+ ESF_DZ_TX_VLAN_TAG1, tci);
+}
+
+
+ __checkReturn efx_rc_t
+ef10_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns)
+{
+ efx_rc_t rc;
+
+ /* FIXME */
+ _NOTE(ARGUNUSED(etp, ns))
+ _NOTE(CONSTANTCONDITION)
+ if (B_FALSE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ /* FIXME */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qflush(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qenable(
+ __in efx_txq_t *etp)
+{
+ /* FIXME */
+ _NOTE(ARGUNUSED(etp))
+ /* FIXME */
+}
+
+#if EFSYS_OPT_QSTATS
+ void
+ef10_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < TX_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, etp->et_stat[id]);
+ etp->et_stat[id] = 0;
+ }
+}
+
+#endif /* EFSYS_OPT_QSTATS */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/ef10_vpd.c b/src/seastar/dpdk/drivers/net/sfc/base/ef10_vpd.c
new file mode 100644
index 00000000..71123a90
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/ef10_vpd.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_VPD
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#include "ef10_tlv_layout.h"
+
+ __checkReturn efx_rc_t
+ef10_vpd_init(
+ __in efx_nic_t *enp)
+{
+ caddr_t svpd;
+ size_t svpd_size;
+ uint32_t pci_pf;
+ uint32_t tag;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_nic_cfg.enc_vpd_is_global) {
+ tag = TLV_TAG_GLOBAL_STATIC_VPD;
+ } else {
+ pci_pf = enp->en_nic_cfg.enc_pf;
+ tag = TLV_TAG_PF_STATIC_VPD(pci_pf);
+ }
+
+ /*
+ * The VPD interface exposes VPD resources from the combined static and
+ * dynamic VPD storage. As the static VPD configuration should *never*
+ * change, we can cache it.
+ */
+ svpd = NULL;
+ svpd_size = 0;
+ rc = ef10_nvram_partn_read_tlv(enp,
+ NVRAM_PARTITION_TYPE_STATIC_CONFIG,
+ tag, &svpd, &svpd_size);
+ if (rc != 0) {
+ if (rc == EACCES) {
+ /* Unprivileged functions cannot access VPD */
+ goto out;
+ }
+ goto fail1;
+ }
+
+ if (svpd != NULL && svpd_size > 0) {
+ if ((rc = efx_vpd_hunk_verify(svpd, svpd_size, NULL)) != 0)
+ goto fail2;
+ }
+
+ enp->en_arch.ef10.ena_svpd = svpd;
+ enp->en_arch.ef10.ena_svpd_length = svpd_size;
+
+out:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, svpd_size, svpd);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * This function returns the total size the user should allocate
+ * for all VPD operations. We've already cached the static vpd,
+ * so we just need to return an upper bound on the dynamic vpd,
+ * which is the size of the DYNAMIC_CONFIG partition.
+ */
+ if ((rc = efx_mcdi_nvram_info(enp, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ sizep, NULL, NULL, NULL)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ caddr_t dvpd;
+ size_t dvpd_size;
+ uint32_t pci_pf;
+ uint32_t tag;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_nic_cfg.enc_vpd_is_global) {
+ tag = TLV_TAG_GLOBAL_DYNAMIC_VPD;
+ } else {
+ pci_pf = enp->en_nic_cfg.enc_pf;
+ tag = TLV_TAG_PF_DYNAMIC_VPD(pci_pf);
+ }
+
+ if ((rc = ef10_nvram_partn_read_tlv(enp,
+ NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ tag, &dvpd, &dvpd_size)) != 0)
+ goto fail1;
+
+ if (dvpd_size > size) {
+ rc = ENOSPC;
+ goto fail2;
+ }
+ memcpy(data, dvpd, dvpd_size);
+
+ /* Pad data with all-1s, consistent with update operations */
+ memset(data + dvpd_size, 0xff, size - dvpd_size);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_vpd_tag_t stag;
+ efx_vpd_tag_t dtag;
+ efx_vpd_keyword_t skey;
+ efx_vpd_keyword_t dkey;
+ unsigned int scont;
+ unsigned int dcont;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * Strictly you could take the view that dynamic vpd is optional.
+ * Instead, to conform more closely to the read/verify/reinit()
+ * paradigm, we require dynamic vpd. ef10_vpd_reinit() will
+ * reinitialize it as required.
+ */
+ if ((rc = efx_vpd_hunk_verify(data, size, NULL)) != 0)
+ goto fail1;
+
+ /*
+ * Verify that there is no duplication between the static and
+ * dynamic cfg sectors.
+ */
+ if (enp->en_arch.ef10.ena_svpd_length == 0)
+ goto done;
+
+ dcont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(data, size, &dtag,
+ &dkey, NULL, NULL, &dcont)) != 0)
+ goto fail2;
+ if (dcont == 0)
+ break;
+
+ /*
+ * Skip the RV keyword. It should be present in both the static
+ * and dynamic cfg sectors.
+ */
+ if (dtag == EFX_VPD_RO && dkey == EFX_VPD_KEYWORD('R', 'V'))
+ continue;
+
+ scont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(
+ enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length, &stag, &skey,
+ NULL, NULL, &scont)) != 0)
+ goto fail3;
+ if (scont == 0)
+ break;
+
+ if (stag == dtag && skey == dkey) {
+ rc = EEXIST;
+ goto fail4;
+ }
+ }
+ }
+
+done:
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ boolean_t wantpid;
+ efx_rc_t rc;
+
+ /*
+ * Only create an ID string if the dynamic cfg doesn't have one
+ */
+ if (enp->en_arch.ef10.ena_svpd_length == 0)
+ wantpid = B_TRUE;
+ else {
+ unsigned int offset;
+ uint8_t length;
+
+ rc = efx_vpd_hunk_get(enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length,
+ EFX_VPD_ID, 0, &offset, &length);
+ if (rc == 0)
+ wantpid = B_FALSE;
+ else if (rc == ENOENT)
+ wantpid = B_TRUE;
+ else
+ goto fail1;
+ }
+
+ if ((rc = efx_vpd_hunk_reinit(data, size, wantpid)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp)
+{
+ unsigned int offset;
+ uint8_t length;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Attempt to satisfy the request from svpd first */
+ if (enp->en_arch.ef10.ena_svpd_length > 0) {
+ if ((rc = efx_vpd_hunk_get(enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ evvp->evv_length = length;
+ memcpy(evvp->evv_value,
+ enp->en_arch.ef10.ena_svpd + offset, length);
+ return (0);
+ } else if (rc != ENOENT)
+ goto fail1;
+ }
+
+ /* And then from the provided data buffer */
+ if ((rc = efx_vpd_hunk_get(data, size, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) != 0) {
+ if (rc == ENOENT)
+ return (rc);
+ goto fail2;
+ }
+
+ evvp->evv_length = length;
+ memcpy(evvp->evv_value, data + offset, length);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* If the provided (tag,keyword) exists in svpd, then it is readonly */
+ if (enp->en_arch.ef10.ena_svpd_length > 0) {
+ unsigned int offset;
+ uint8_t length;
+
+ if ((rc = efx_vpd_hunk_get(enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ rc = EACCES;
+ goto fail1;
+ }
+ }
+
+ if ((rc = efx_vpd_hunk_set(data, size, evvp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp)
+{
+ _NOTE(ARGUNUSED(enp, data, size, evvp, contp))
+
+ return (ENOTSUP);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t vpd_length;
+ uint32_t pci_pf;
+ uint32_t tag;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_nic_cfg.enc_vpd_is_global) {
+ tag = TLV_TAG_GLOBAL_DYNAMIC_VPD;
+ } else {
+ pci_pf = enp->en_nic_cfg.enc_pf;
+ tag = TLV_TAG_PF_DYNAMIC_VPD(pci_pf);
+ }
+
+ /* Determine total length of new dynamic VPD */
+ if ((rc = efx_vpd_hunk_length(data, size, &vpd_length)) != 0)
+ goto fail1;
+
+ /* Store new dynamic VPD in all segments in DYNAMIC_CONFIG partition */
+ if ((rc = ef10_nvram_partn_write_segment_tlv(enp,
+ NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ tag, data, vpd_length, B_TRUE)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_vpd_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_arch.ef10.ena_svpd_length > 0) {
+ EFSYS_KMEM_FREE(enp->en_esip, enp->en_arch.ef10.ena_svpd_length,
+ enp->en_arch.ef10.ena_svpd);
+
+ enp->en_arch.ef10.ena_svpd = NULL;
+ enp->en_arch.ef10.ena_svpd_length = 0;
+ }
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+#endif /* EFSYS_OPT_VPD */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx.h b/src/seastar/dpdk/drivers/net/sfc/base/efx.h
new file mode 100644
index 00000000..7eabc370
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx.h
@@ -0,0 +1,2535 @@
+/*
+ * Copyright (c) 2006-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_H
+#define _SYS_EFX_H
+
+#include "efsys.h"
+#include "efx_check.h"
+#include "efx_phy_ids.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EFX_STATIC_ASSERT(_cond) \
+ ((void)sizeof(char[(_cond) ? 1 : -1]))
+
+#define EFX_ARRAY_SIZE(_array) \
+ (sizeof(_array) / sizeof((_array)[0]))
+
+#define EFX_FIELD_OFFSET(_type, _field) \
+ ((size_t) &(((_type *)0)->_field))
+
+/* Return codes */
+
+typedef __success(return == 0) int efx_rc_t;
+
+
+/* Chip families */
+
+typedef enum efx_family_e {
+ EFX_FAMILY_INVALID,
+ EFX_FAMILY_FALCON, /* Obsolete and not supported */
+ EFX_FAMILY_SIENA,
+ EFX_FAMILY_HUNTINGTON,
+ EFX_FAMILY_MEDFORD,
+ EFX_FAMILY_NTYPES
+} efx_family_t;
+
+extern __checkReturn efx_rc_t
+efx_family(
+ __in uint16_t venid,
+ __in uint16_t devid,
+ __out efx_family_t *efp);
+
+
+#define EFX_PCI_VENID_SFC 0x1924
+
+#define EFX_PCI_DEVID_FALCON 0x0710 /* SFC4000 */
+
+#define EFX_PCI_DEVID_BETHPAGE 0x0803 /* SFC9020 */
+#define EFX_PCI_DEVID_SIENA 0x0813 /* SFL9021 */
+#define EFX_PCI_DEVID_SIENA_F1_UNINIT 0x0810
+
+#define EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT 0x0901
+#define EFX_PCI_DEVID_FARMINGDALE 0x0903 /* SFC9120 PF */
+#define EFX_PCI_DEVID_GREENPORT 0x0923 /* SFC9140 PF */
+
+#define EFX_PCI_DEVID_FARMINGDALE_VF 0x1903 /* SFC9120 VF */
+#define EFX_PCI_DEVID_GREENPORT_VF 0x1923 /* SFC9140 VF */
+
+#define EFX_PCI_DEVID_MEDFORD_PF_UNINIT 0x0913
+#define EFX_PCI_DEVID_MEDFORD 0x0A03 /* SFC9240 PF */
+#define EFX_PCI_DEVID_MEDFORD_VF 0x1A03 /* SFC9240 VF */
+
+#define EFX_MEM_BAR 2
+
+/* Error codes */
+
+enum {
+ EFX_ERR_INVALID,
+ EFX_ERR_SRAM_OOB,
+ EFX_ERR_BUFID_DC_OOB,
+ EFX_ERR_MEM_PERR,
+ EFX_ERR_RBUF_OWN,
+ EFX_ERR_TBUF_OWN,
+ EFX_ERR_RDESQ_OWN,
+ EFX_ERR_TDESQ_OWN,
+ EFX_ERR_EVQ_OWN,
+ EFX_ERR_EVFF_OFLO,
+ EFX_ERR_ILL_ADDR,
+ EFX_ERR_SRAM_PERR,
+ EFX_ERR_NCODES
+};
+
+/* Calculate the IEEE 802.3 CRC32 of a MAC addr */
+extern __checkReturn uint32_t
+efx_crc32_calculate(
+ __in uint32_t crc_init,
+ __in_ecount(length) uint8_t const *input,
+ __in int length);
+
+
+/* Type prototypes */
+
+typedef struct efx_rxq_s efx_rxq_t;
+
+/* NIC */
+
+typedef struct efx_nic_s efx_nic_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_create(
+ __in efx_family_t family,
+ __in efsys_identifier_t *esip,
+ __in efsys_bar_t *esbp,
+ __in efsys_lock_t *eslp,
+ __deref_out efx_nic_t **enpp);
+
+extern __checkReturn efx_rc_t
+efx_nic_probe(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_nic_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_nic_reset(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+efx_nic_register_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern void
+efx_nic_fini(
+ __in efx_nic_t *enp);
+
+extern void
+efx_nic_unprobe(
+ __in efx_nic_t *enp);
+
+extern void
+efx_nic_destroy(
+ __in efx_nic_t *enp);
+
+#define EFX_PCIE_LINK_SPEED_GEN1 1
+#define EFX_PCIE_LINK_SPEED_GEN2 2
+#define EFX_PCIE_LINK_SPEED_GEN3 3
+
+typedef enum efx_pcie_link_performance_e {
+ EFX_PCIE_LINK_PERFORMANCE_UNKNOWN_BANDWIDTH,
+ EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_BANDWIDTH,
+ EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_LATENCY,
+ EFX_PCIE_LINK_PERFORMANCE_OPTIMAL
+} efx_pcie_link_performance_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_calculate_pcie_link_bandwidth(
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out uint32_t *bandwidth_mbpsp);
+
+extern __checkReturn efx_rc_t
+efx_nic_check_pcie_link_speed(
+ __in efx_nic_t *enp,
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out efx_pcie_link_performance_t *resultp);
+
+#if EFSYS_OPT_MCDI
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+/* Huntington and Medford require MCDIv2 commands */
+#define WITH_MCDI_V2 1
+#endif
+
+typedef struct efx_mcdi_req_s efx_mcdi_req_t;
+
+typedef enum efx_mcdi_exception_e {
+ EFX_MCDI_EXCEPTION_MC_REBOOT,
+ EFX_MCDI_EXCEPTION_MC_BADASSERT,
+} efx_mcdi_exception_t;
+
+#if EFSYS_OPT_MCDI_LOGGING
+typedef enum efx_log_msg_e {
+ EFX_LOG_INVALID,
+ EFX_LOG_MCDI_REQUEST,
+ EFX_LOG_MCDI_RESPONSE,
+} efx_log_msg_t;
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+typedef struct efx_mcdi_transport_s {
+ void *emt_context;
+ efsys_mem_t *emt_dma_mem;
+ void (*emt_execute)(void *, efx_mcdi_req_t *);
+ void (*emt_ev_cpl)(void *);
+ void (*emt_exception)(void *, efx_mcdi_exception_t);
+#if EFSYS_OPT_MCDI_LOGGING
+ void (*emt_logger)(void *, efx_log_msg_t,
+ void *, size_t, void *, size_t);
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ void (*emt_ev_proxy_response)(void *, uint32_t, efx_rc_t);
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+} efx_mcdi_transport_t;
+
+extern __checkReturn efx_rc_t
+efx_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_reboot(
+ __in efx_nic_t *enp);
+
+ void
+efx_mcdi_new_epoch(
+ __in efx_nic_t *enp);
+
+extern void
+efx_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *usec_timeoutp);
+
+extern void
+efx_mcdi_request_start(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __in boolean_t ev_cpl);
+
+extern __checkReturn boolean_t
+efx_mcdi_request_poll(
+ __in efx_nic_t *enp);
+
+extern __checkReturn boolean_t
+efx_mcdi_request_abort(
+ __in efx_nic_t *enp);
+
+extern void
+efx_mcdi_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+/* INTR */
+
+#define EFX_NINTR_SIENA 1024
+
+typedef enum efx_intr_type_e {
+ EFX_INTR_INVALID = 0,
+ EFX_INTR_LINE,
+ EFX_INTR_MESSAGE,
+ EFX_INTR_NTYPES
+} efx_intr_type_t;
+
+#define EFX_INTR_SIZE (sizeof (efx_oword_t))
+
+extern __checkReturn efx_rc_t
+efx_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp);
+
+extern void
+efx_intr_enable(
+ __in efx_nic_t *enp);
+
+extern void
+efx_intr_disable(
+ __in efx_nic_t *enp);
+
+extern void
+efx_intr_disable_unlocked(
+ __in efx_nic_t *enp);
+
+#define EFX_INTR_NEVQS 32
+
+extern __checkReturn efx_rc_t
+efx_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level);
+
+extern void
+efx_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *maskp);
+
+extern void
+efx_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp);
+
+extern void
+efx_intr_fatal(
+ __in efx_nic_t *enp);
+
+extern void
+efx_intr_fini(
+ __in efx_nic_t *enp);
+
+/* MAC */
+
+#if EFSYS_OPT_MAC_STATS
+
+/* START MKCONFIG GENERATED EfxHeaderMacBlock e323546097fd7c65 */
+typedef enum efx_mac_stat_e {
+ EFX_MAC_RX_OCTETS,
+ EFX_MAC_RX_PKTS,
+ EFX_MAC_RX_UNICST_PKTS,
+ EFX_MAC_RX_MULTICST_PKTS,
+ EFX_MAC_RX_BRDCST_PKTS,
+ EFX_MAC_RX_PAUSE_PKTS,
+ EFX_MAC_RX_LE_64_PKTS,
+ EFX_MAC_RX_65_TO_127_PKTS,
+ EFX_MAC_RX_128_TO_255_PKTS,
+ EFX_MAC_RX_256_TO_511_PKTS,
+ EFX_MAC_RX_512_TO_1023_PKTS,
+ EFX_MAC_RX_1024_TO_15XX_PKTS,
+ EFX_MAC_RX_GE_15XX_PKTS,
+ EFX_MAC_RX_ERRORS,
+ EFX_MAC_RX_FCS_ERRORS,
+ EFX_MAC_RX_DROP_EVENTS,
+ EFX_MAC_RX_FALSE_CARRIER_ERRORS,
+ EFX_MAC_RX_SYMBOL_ERRORS,
+ EFX_MAC_RX_ALIGN_ERRORS,
+ EFX_MAC_RX_INTERNAL_ERRORS,
+ EFX_MAC_RX_JABBER_PKTS,
+ EFX_MAC_RX_LANE0_CHAR_ERR,
+ EFX_MAC_RX_LANE1_CHAR_ERR,
+ EFX_MAC_RX_LANE2_CHAR_ERR,
+ EFX_MAC_RX_LANE3_CHAR_ERR,
+ EFX_MAC_RX_LANE0_DISP_ERR,
+ EFX_MAC_RX_LANE1_DISP_ERR,
+ EFX_MAC_RX_LANE2_DISP_ERR,
+ EFX_MAC_RX_LANE3_DISP_ERR,
+ EFX_MAC_RX_MATCH_FAULT,
+ EFX_MAC_RX_NODESC_DROP_CNT,
+ EFX_MAC_TX_OCTETS,
+ EFX_MAC_TX_PKTS,
+ EFX_MAC_TX_UNICST_PKTS,
+ EFX_MAC_TX_MULTICST_PKTS,
+ EFX_MAC_TX_BRDCST_PKTS,
+ EFX_MAC_TX_PAUSE_PKTS,
+ EFX_MAC_TX_LE_64_PKTS,
+ EFX_MAC_TX_65_TO_127_PKTS,
+ EFX_MAC_TX_128_TO_255_PKTS,
+ EFX_MAC_TX_256_TO_511_PKTS,
+ EFX_MAC_TX_512_TO_1023_PKTS,
+ EFX_MAC_TX_1024_TO_15XX_PKTS,
+ EFX_MAC_TX_GE_15XX_PKTS,
+ EFX_MAC_TX_ERRORS,
+ EFX_MAC_TX_SGL_COL_PKTS,
+ EFX_MAC_TX_MULT_COL_PKTS,
+ EFX_MAC_TX_EX_COL_PKTS,
+ EFX_MAC_TX_LATE_COL_PKTS,
+ EFX_MAC_TX_DEF_PKTS,
+ EFX_MAC_TX_EX_DEF_PKTS,
+ EFX_MAC_PM_TRUNC_BB_OVERFLOW,
+ EFX_MAC_PM_DISCARD_BB_OVERFLOW,
+ EFX_MAC_PM_TRUNC_VFIFO_FULL,
+ EFX_MAC_PM_DISCARD_VFIFO_FULL,
+ EFX_MAC_PM_TRUNC_QBB,
+ EFX_MAC_PM_DISCARD_QBB,
+ EFX_MAC_PM_DISCARD_MAPPING,
+ EFX_MAC_RXDP_Q_DISABLED_PKTS,
+ EFX_MAC_RXDP_DI_DROPPED_PKTS,
+ EFX_MAC_RXDP_STREAMING_PKTS,
+ EFX_MAC_RXDP_HLB_FETCH,
+ EFX_MAC_RXDP_HLB_WAIT,
+ EFX_MAC_VADAPTER_RX_UNICAST_PACKETS,
+ EFX_MAC_VADAPTER_RX_UNICAST_BYTES,
+ EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS,
+ EFX_MAC_VADAPTER_RX_MULTICAST_BYTES,
+ EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS,
+ EFX_MAC_VADAPTER_RX_BROADCAST_BYTES,
+ EFX_MAC_VADAPTER_RX_BAD_PACKETS,
+ EFX_MAC_VADAPTER_RX_BAD_BYTES,
+ EFX_MAC_VADAPTER_RX_OVERFLOW,
+ EFX_MAC_VADAPTER_TX_UNICAST_PACKETS,
+ EFX_MAC_VADAPTER_TX_UNICAST_BYTES,
+ EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS,
+ EFX_MAC_VADAPTER_TX_MULTICAST_BYTES,
+ EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS,
+ EFX_MAC_VADAPTER_TX_BROADCAST_BYTES,
+ EFX_MAC_VADAPTER_TX_BAD_PACKETS,
+ EFX_MAC_VADAPTER_TX_BAD_BYTES,
+ EFX_MAC_VADAPTER_TX_OVERFLOW,
+ EFX_MAC_NSTATS
+} efx_mac_stat_t;
+
+/* END MKCONFIG GENERATED EfxHeaderMacBlock */
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+typedef enum efx_link_mode_e {
+ EFX_LINK_UNKNOWN = 0,
+ EFX_LINK_DOWN,
+ EFX_LINK_10HDX,
+ EFX_LINK_10FDX,
+ EFX_LINK_100HDX,
+ EFX_LINK_100FDX,
+ EFX_LINK_1000HDX,
+ EFX_LINK_1000FDX,
+ EFX_LINK_10000FDX,
+ EFX_LINK_40000FDX,
+ EFX_LINK_NMODES
+} efx_link_mode_t;
+
+#define EFX_MAC_ADDR_LEN 6
+
+#define EFX_MAC_ADDR_IS_MULTICAST(_address) (((uint8_t *)_address)[0] & 0x01)
+
+#define EFX_MAC_MULTICAST_LIST_MAX 256
+
+#define EFX_MAC_SDU_MAX 9202
+
+#define EFX_MAC_PDU_ADJUSTMENT \
+ (/* EtherII */ 14 \
+ + /* VLAN */ 4 \
+ + /* CRC */ 4 \
+ + /* bug16011 */ 16) \
+
+#define EFX_MAC_PDU(_sdu) \
+ P2ROUNDUP((_sdu) + EFX_MAC_PDU_ADJUSTMENT, 8)
+
+/*
+ * Due to the P2ROUNDUP in EFX_MAC_PDU(), EFX_MAC_SDU_FROM_PDU() may give
+ * the SDU rounded up slightly.
+ */
+#define EFX_MAC_SDU_FROM_PDU(_pdu) ((_pdu) - EFX_MAC_PDU_ADJUSTMENT)
+
+#define EFX_MAC_PDU_MIN 60
+#define EFX_MAC_PDU_MAX EFX_MAC_PDU(EFX_MAC_SDU_MAX)
+
+extern __checkReturn efx_rc_t
+efx_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu);
+
+extern __checkReturn efx_rc_t
+efx_mac_pdu_set(
+ __in efx_nic_t *enp,
+ __in size_t pdu);
+
+extern __checkReturn efx_rc_t
+efx_mac_addr_set(
+ __in efx_nic_t *enp,
+ __in uint8_t *addr);
+
+extern __checkReturn efx_rc_t
+efx_mac_filter_set(
+ __in efx_nic_t *enp,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst);
+
+extern __checkReturn efx_rc_t
+efx_mac_multicast_list_set(
+ __in efx_nic_t *enp,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count);
+
+extern __checkReturn efx_rc_t
+efx_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss);
+
+extern void
+efx_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mac_drain(
+ __in efx_nic_t *enp,
+ __in boolean_t enabled);
+
+extern __checkReturn efx_rc_t
+efx_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp);
+
+#define EFX_FCNTL_RESPOND 0x00000001
+#define EFX_FCNTL_GENERATE 0x00000002
+
+extern __checkReturn efx_rc_t
+efx_mac_fcntl_set(
+ __in efx_nic_t *enp,
+ __in unsigned int fcntl,
+ __in boolean_t autoneg);
+
+extern void
+efx_mac_fcntl_get(
+ __in efx_nic_t *enp,
+ __out unsigned int *fcntl_wantedp,
+ __out unsigned int *fcntl_linkp);
+
+
+#if EFSYS_OPT_MAC_STATS
+
+#if EFSYS_OPT_NAMES
+
+extern __checkReturn const char *
+efx_mac_stat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+#define EFX_MAC_STATS_MASK_BITS_PER_PAGE (8 * sizeof (uint32_t))
+
+#define EFX_MAC_STATS_MASK_NPAGES \
+ (P2ROUNDUP(EFX_MAC_NSTATS, EFX_MAC_STATS_MASK_BITS_PER_PAGE) / \
+ EFX_MAC_STATS_MASK_BITS_PER_PAGE)
+
+/*
+ * Get mask of MAC statistics supported by the hardware.
+ *
+ * If mask_size is insufficient to return the mask, EINVAL error is
+ * returned. EFX_MAC_STATS_MASK_NPAGES multiplied by size of the page
+ * (which is sizeof (uint32_t)) is sufficient.
+ */
+extern __checkReturn efx_rc_t
+efx_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __out_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size);
+
+#define EFX_MAC_STAT_SUPPORTED(_mask, _stat) \
+ ((_mask)[(_stat) / EFX_MAC_STATS_MASK_BITS_PER_PAGE] & \
+ (1ULL << ((_stat) & (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1))))
+
+#define EFX_MAC_STATS_SIZE 0x400
+
+extern __checkReturn efx_rc_t
+efx_mac_stats_clear(
+ __in efx_nic_t *enp);
+
+/*
+ * Upload mac statistics supported by the hardware into the given buffer.
+ *
+ * The reference buffer must be at least %EFX_MAC_STATS_SIZE bytes,
+ * and page aligned.
+ *
+ * The hardware will only DMA statistics that it understands (of course).
+ * Drivers should not make any assumptions about which statistics are
+ * supported, especially when the statistics are generated by firmware.
+ *
+ * Thus, drivers should zero this buffer before use, so that not-understood
+ * statistics read back as zero.
+ */
+extern __checkReturn efx_rc_t
+efx_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp);
+
+extern __checkReturn efx_rc_t
+efx_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events);
+
+extern __checkReturn efx_rc_t
+efx_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+/* MON */
+
+typedef enum efx_mon_type_e {
+ EFX_MON_INVALID = 0,
+ EFX_MON_SFC90X0,
+ EFX_MON_SFC91X0,
+ EFX_MON_SFC92X0,
+ EFX_MON_NTYPES
+} efx_mon_type_t;
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_mon_name(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern __checkReturn efx_rc_t
+efx_mon_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_MON_STATS
+
+#define EFX_MON_STATS_PAGE_SIZE 0x100
+#define EFX_MON_MASK_ELEMENT_SIZE 32
+
+/* START MKCONFIG GENERATED MonitorHeaderStatsBlock 5d4ee5185e419abe */
+typedef enum efx_mon_stat_e {
+ EFX_MON_STAT_2_5V,
+ EFX_MON_STAT_VCCP1,
+ EFX_MON_STAT_VCC,
+ EFX_MON_STAT_5V,
+ EFX_MON_STAT_12V,
+ EFX_MON_STAT_VCCP2,
+ EFX_MON_STAT_EXT_TEMP,
+ EFX_MON_STAT_INT_TEMP,
+ EFX_MON_STAT_AIN1,
+ EFX_MON_STAT_AIN2,
+ EFX_MON_STAT_INT_COOLING,
+ EFX_MON_STAT_EXT_COOLING,
+ EFX_MON_STAT_1V,
+ EFX_MON_STAT_1_2V,
+ EFX_MON_STAT_1_8V,
+ EFX_MON_STAT_3_3V,
+ EFX_MON_STAT_1_2VA,
+ EFX_MON_STAT_VREF,
+ EFX_MON_STAT_VAOE,
+ EFX_MON_STAT_AOE_TEMP,
+ EFX_MON_STAT_PSU_AOE_TEMP,
+ EFX_MON_STAT_PSU_TEMP,
+ EFX_MON_STAT_FAN0,
+ EFX_MON_STAT_FAN1,
+ EFX_MON_STAT_FAN2,
+ EFX_MON_STAT_FAN3,
+ EFX_MON_STAT_FAN4,
+ EFX_MON_STAT_VAOE_IN,
+ EFX_MON_STAT_IAOE,
+ EFX_MON_STAT_IAOE_IN,
+ EFX_MON_STAT_NIC_POWER,
+ EFX_MON_STAT_0_9V,
+ EFX_MON_STAT_I0_9V,
+ EFX_MON_STAT_I1_2V,
+ EFX_MON_STAT_0_9V_ADC,
+ EFX_MON_STAT_INT_TEMP2,
+ EFX_MON_STAT_VREG_TEMP,
+ EFX_MON_STAT_VREG_0_9V_TEMP,
+ EFX_MON_STAT_VREG_1_2V_TEMP,
+ EFX_MON_STAT_INT_VPTAT,
+ EFX_MON_STAT_INT_ADC_TEMP,
+ EFX_MON_STAT_EXT_VPTAT,
+ EFX_MON_STAT_EXT_ADC_TEMP,
+ EFX_MON_STAT_AMBIENT_TEMP,
+ EFX_MON_STAT_AIRFLOW,
+ EFX_MON_STAT_VDD08D_VSS08D_CSR,
+ EFX_MON_STAT_VDD08D_VSS08D_CSR_EXTADC,
+ EFX_MON_STAT_HOTPOINT_TEMP,
+ EFX_MON_STAT_PHY_POWER_SWITCH_PORT0,
+ EFX_MON_STAT_PHY_POWER_SWITCH_PORT1,
+ EFX_MON_STAT_MUM_VCC,
+ EFX_MON_STAT_0V9_A,
+ EFX_MON_STAT_I0V9_A,
+ EFX_MON_STAT_0V9_A_TEMP,
+ EFX_MON_STAT_0V9_B,
+ EFX_MON_STAT_I0V9_B,
+ EFX_MON_STAT_0V9_B_TEMP,
+ EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY,
+ EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY_EXT_ADC,
+ EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY,
+ EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_MASTER_VPTAT,
+ EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP,
+ EFX_MON_STAT_CONTROLLER_MASTER_VPTAT_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT,
+ EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP,
+ EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP_EXT_ADC,
+ EFX_MON_STAT_SODIMM_VOUT,
+ EFX_MON_STAT_SODIMM_0_TEMP,
+ EFX_MON_STAT_SODIMM_1_TEMP,
+ EFX_MON_STAT_PHY0_VCC,
+ EFX_MON_STAT_PHY1_VCC,
+ EFX_MON_STAT_CONTROLLER_TDIODE_TEMP,
+ EFX_MON_STAT_BOARD_FRONT_TEMP,
+ EFX_MON_STAT_BOARD_BACK_TEMP,
+ EFX_MON_NSTATS
+} efx_mon_stat_t;
+
+/* END MKCONFIG GENERATED MonitorHeaderStatsBlock */
+
+typedef enum efx_mon_stat_state_e {
+ EFX_MON_STAT_STATE_OK = 0,
+ EFX_MON_STAT_STATE_WARNING = 1,
+ EFX_MON_STAT_STATE_FATAL = 2,
+ EFX_MON_STAT_STATE_BROKEN = 3,
+ EFX_MON_STAT_STATE_NO_READING = 4,
+} efx_mon_stat_state_t;
+
+typedef struct efx_mon_stat_value_s {
+ uint16_t emsv_value;
+ uint16_t emsv_state;
+} efx_mon_stat_value_t;
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_mon_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_mon_stat_t id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern __checkReturn efx_rc_t
+efx_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values);
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+extern void
+efx_mon_fini(
+ __in efx_nic_t *enp);
+
+/* PHY */
+
+extern __checkReturn efx_rc_t
+efx_phy_verify(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+
+typedef enum efx_phy_led_mode_e {
+ EFX_PHY_LED_DEFAULT = 0,
+ EFX_PHY_LED_OFF,
+ EFX_PHY_LED_ON,
+ EFX_PHY_LED_FLASH,
+ EFX_PHY_LED_NMODES
+} efx_phy_led_mode_t;
+
+extern __checkReturn efx_rc_t
+efx_phy_led_set(
+ __in efx_nic_t *enp,
+ __in efx_phy_led_mode_t mode);
+
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+extern __checkReturn efx_rc_t
+efx_port_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_LOOPBACK
+
+typedef enum efx_loopback_type_e {
+ EFX_LOOPBACK_OFF = 0,
+ EFX_LOOPBACK_DATA = 1,
+ EFX_LOOPBACK_GMAC = 2,
+ EFX_LOOPBACK_XGMII = 3,
+ EFX_LOOPBACK_XGXS = 4,
+ EFX_LOOPBACK_XAUI = 5,
+ EFX_LOOPBACK_GMII = 6,
+ EFX_LOOPBACK_SGMII = 7,
+ EFX_LOOPBACK_XGBR = 8,
+ EFX_LOOPBACK_XFI = 9,
+ EFX_LOOPBACK_XAUI_FAR = 10,
+ EFX_LOOPBACK_GMII_FAR = 11,
+ EFX_LOOPBACK_SGMII_FAR = 12,
+ EFX_LOOPBACK_XFI_FAR = 13,
+ EFX_LOOPBACK_GPHY = 14,
+ EFX_LOOPBACK_PHY_XS = 15,
+ EFX_LOOPBACK_PCS = 16,
+ EFX_LOOPBACK_PMA_PMD = 17,
+ EFX_LOOPBACK_XPORT = 18,
+ EFX_LOOPBACK_XGMII_WS = 19,
+ EFX_LOOPBACK_XAUI_WS = 20,
+ EFX_LOOPBACK_XAUI_WS_FAR = 21,
+ EFX_LOOPBACK_XAUI_WS_NEAR = 22,
+ EFX_LOOPBACK_GMII_WS = 23,
+ EFX_LOOPBACK_XFI_WS = 24,
+ EFX_LOOPBACK_XFI_WS_FAR = 25,
+ EFX_LOOPBACK_PHYXS_WS = 26,
+ EFX_LOOPBACK_PMA_INT = 27,
+ EFX_LOOPBACK_SD_NEAR = 28,
+ EFX_LOOPBACK_SD_FAR = 29,
+ EFX_LOOPBACK_PMA_INT_WS = 30,
+ EFX_LOOPBACK_SD_FEP2_WS = 31,
+ EFX_LOOPBACK_SD_FEP1_5_WS = 32,
+ EFX_LOOPBACK_SD_FEP_WS = 33,
+ EFX_LOOPBACK_SD_FES_WS = 34,
+ EFX_LOOPBACK_NTYPES
+} efx_loopback_type_t;
+
+typedef enum efx_loopback_kind_e {
+ EFX_LOOPBACK_KIND_OFF = 0,
+ EFX_LOOPBACK_KIND_ALL,
+ EFX_LOOPBACK_KIND_MAC,
+ EFX_LOOPBACK_KIND_PHY,
+ EFX_LOOPBACK_NKINDS
+} efx_loopback_kind_t;
+
+extern void
+efx_loopback_mask(
+ __in efx_loopback_kind_t loopback_kind,
+ __out efx_qword_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_port_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t type);
+
+#if EFSYS_OPT_NAMES
+
+extern __checkReturn const char *
+efx_loopback_type_name(
+ __in efx_nic_t *enp,
+ __in efx_loopback_type_t type);
+
+#endif /* EFSYS_OPT_NAMES */
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+extern __checkReturn efx_rc_t
+efx_port_poll(
+ __in efx_nic_t *enp,
+ __out_opt efx_link_mode_t *link_modep);
+
+extern void
+efx_port_fini(
+ __in efx_nic_t *enp);
+
+typedef enum efx_phy_cap_type_e {
+ EFX_PHY_CAP_INVALID = 0,
+ EFX_PHY_CAP_10HDX,
+ EFX_PHY_CAP_10FDX,
+ EFX_PHY_CAP_100HDX,
+ EFX_PHY_CAP_100FDX,
+ EFX_PHY_CAP_1000HDX,
+ EFX_PHY_CAP_1000FDX,
+ EFX_PHY_CAP_10000FDX,
+ EFX_PHY_CAP_PAUSE,
+ EFX_PHY_CAP_ASYM,
+ EFX_PHY_CAP_AN,
+ EFX_PHY_CAP_40000FDX,
+ EFX_PHY_CAP_NTYPES
+} efx_phy_cap_type_t;
+
+
+#define EFX_PHY_CAP_CURRENT 0x00000000
+#define EFX_PHY_CAP_DEFAULT 0x00000001
+#define EFX_PHY_CAP_PERM 0x00000002
+
+extern void
+efx_phy_adv_cap_get(
+ __in efx_nic_t *enp,
+ __in uint32_t flag,
+ __out uint32_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_phy_adv_cap_set(
+ __in efx_nic_t *enp,
+ __in uint32_t mask);
+
+extern void
+efx_phy_lp_cap_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip);
+
+typedef enum efx_phy_media_type_e {
+ EFX_PHY_MEDIA_INVALID = 0,
+ EFX_PHY_MEDIA_XAUI,
+ EFX_PHY_MEDIA_CX4,
+ EFX_PHY_MEDIA_KX4,
+ EFX_PHY_MEDIA_XFP,
+ EFX_PHY_MEDIA_SFP_PLUS,
+ EFX_PHY_MEDIA_BASE_T,
+ EFX_PHY_MEDIA_QSFP_PLUS,
+ EFX_PHY_MEDIA_NTYPES
+} efx_phy_media_type_t;
+
+/* Get the type of medium currently used. If the board has ports for
+ * modules, a module is present, and we recognise the media type of
+ * the module, then this will be the media type of the module.
+ * Otherwise it will be the media type of the port.
+ */
+extern void
+efx_phy_media_type_get(
+ __in efx_nic_t *enp,
+ __out efx_phy_media_type_t *typep);
+
+extern efx_rc_t
+efx_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data);
+
+#if EFSYS_OPT_PHY_STATS
+
+/* START MKCONFIG GENERATED PhyHeaderStatsBlock 30ed56ad501f8e36 */
+typedef enum efx_phy_stat_e {
+ EFX_PHY_STAT_OUI,
+ EFX_PHY_STAT_PMA_PMD_LINK_UP,
+ EFX_PHY_STAT_PMA_PMD_RX_FAULT,
+ EFX_PHY_STAT_PMA_PMD_TX_FAULT,
+ EFX_PHY_STAT_PMA_PMD_REV_A,
+ EFX_PHY_STAT_PMA_PMD_REV_B,
+ EFX_PHY_STAT_PMA_PMD_REV_C,
+ EFX_PHY_STAT_PMA_PMD_REV_D,
+ EFX_PHY_STAT_PCS_LINK_UP,
+ EFX_PHY_STAT_PCS_RX_FAULT,
+ EFX_PHY_STAT_PCS_TX_FAULT,
+ EFX_PHY_STAT_PCS_BER,
+ EFX_PHY_STAT_PCS_BLOCK_ERRORS,
+ EFX_PHY_STAT_PHY_XS_LINK_UP,
+ EFX_PHY_STAT_PHY_XS_RX_FAULT,
+ EFX_PHY_STAT_PHY_XS_TX_FAULT,
+ EFX_PHY_STAT_PHY_XS_ALIGN,
+ EFX_PHY_STAT_PHY_XS_SYNC_A,
+ EFX_PHY_STAT_PHY_XS_SYNC_B,
+ EFX_PHY_STAT_PHY_XS_SYNC_C,
+ EFX_PHY_STAT_PHY_XS_SYNC_D,
+ EFX_PHY_STAT_AN_LINK_UP,
+ EFX_PHY_STAT_AN_MASTER,
+ EFX_PHY_STAT_AN_LOCAL_RX_OK,
+ EFX_PHY_STAT_AN_REMOTE_RX_OK,
+ EFX_PHY_STAT_CL22EXT_LINK_UP,
+ EFX_PHY_STAT_SNR_A,
+ EFX_PHY_STAT_SNR_B,
+ EFX_PHY_STAT_SNR_C,
+ EFX_PHY_STAT_SNR_D,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_A,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_B,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_C,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_D,
+ EFX_PHY_STAT_AN_COMPLETE,
+ EFX_PHY_STAT_PMA_PMD_REV_MAJOR,
+ EFX_PHY_STAT_PMA_PMD_REV_MINOR,
+ EFX_PHY_STAT_PMA_PMD_REV_MICRO,
+ EFX_PHY_STAT_PCS_FW_VERSION_0,
+ EFX_PHY_STAT_PCS_FW_VERSION_1,
+ EFX_PHY_STAT_PCS_FW_VERSION_2,
+ EFX_PHY_STAT_PCS_FW_VERSION_3,
+ EFX_PHY_STAT_PCS_FW_BUILD_YY,
+ EFX_PHY_STAT_PCS_FW_BUILD_MM,
+ EFX_PHY_STAT_PCS_FW_BUILD_DD,
+ EFX_PHY_STAT_PCS_OP_MODE,
+ EFX_PHY_NSTATS
+} efx_phy_stat_t;
+
+/* END MKCONFIG GENERATED PhyHeaderStatsBlock */
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_phy_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_phy_stat_t stat);
+
+#endif /* EFSYS_OPT_NAMES */
+
+#define EFX_PHY_STATS_SIZE 0x100
+
+extern __checkReturn efx_rc_t
+efx_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat);
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+
+#if EFSYS_OPT_BIST
+
+typedef enum efx_bist_type_e {
+ EFX_BIST_TYPE_UNKNOWN,
+ EFX_BIST_TYPE_PHY_NORMAL,
+ EFX_BIST_TYPE_PHY_CABLE_SHORT,
+ EFX_BIST_TYPE_PHY_CABLE_LONG,
+ EFX_BIST_TYPE_MC_MEM, /* Test the MC DMEM and IMEM */
+ EFX_BIST_TYPE_SAT_MEM, /* Test the DMEM and IMEM of satellite cpus*/
+ EFX_BIST_TYPE_REG, /* Test the register memories */
+ EFX_BIST_TYPE_NTYPES,
+} efx_bist_type_t;
+
+typedef enum efx_bist_result_e {
+ EFX_BIST_RESULT_UNKNOWN,
+ EFX_BIST_RESULT_RUNNING,
+ EFX_BIST_RESULT_PASSED,
+ EFX_BIST_RESULT_FAILED,
+} efx_bist_result_t;
+
+typedef enum efx_phy_cable_status_e {
+ EFX_PHY_CABLE_STATUS_OK,
+ EFX_PHY_CABLE_STATUS_INVALID,
+ EFX_PHY_CABLE_STATUS_OPEN,
+ EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT,
+ EFX_PHY_CABLE_STATUS_INTERPAIRSHORT,
+ EFX_PHY_CABLE_STATUS_BUSY,
+} efx_phy_cable_status_t;
+
+typedef enum efx_bist_value_e {
+ EFX_BIST_PHY_CABLE_LENGTH_A,
+ EFX_BIST_PHY_CABLE_LENGTH_B,
+ EFX_BIST_PHY_CABLE_LENGTH_C,
+ EFX_BIST_PHY_CABLE_LENGTH_D,
+ EFX_BIST_PHY_CABLE_STATUS_A,
+ EFX_BIST_PHY_CABLE_STATUS_B,
+ EFX_BIST_PHY_CABLE_STATUS_C,
+ EFX_BIST_PHY_CABLE_STATUS_D,
+ EFX_BIST_FAULT_CODE,
+ /* Memory BIST specific values. These match to the MC_CMD_BIST_POLL
+ * response. */
+ EFX_BIST_MEM_TEST,
+ EFX_BIST_MEM_ADDR,
+ EFX_BIST_MEM_BUS,
+ EFX_BIST_MEM_EXPECT,
+ EFX_BIST_MEM_ACTUAL,
+ EFX_BIST_MEM_ECC,
+ EFX_BIST_MEM_ECC_PARITY,
+ EFX_BIST_MEM_ECC_FATAL,
+ EFX_BIST_NVALUES,
+} efx_bist_value_t;
+
+extern __checkReturn efx_rc_t
+efx_bist_enable_offline(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+extern __checkReturn efx_rc_t
+efx_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt uint32_t *value_maskp,
+ __out_ecount_opt(count) unsigned long *valuesp,
+ __in size_t count);
+
+extern void
+efx_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+#endif /* EFSYS_OPT_BIST */
+
+#define EFX_FEATURE_IPV6 0x00000001
+#define EFX_FEATURE_LFSR_HASH_INSERT 0x00000002
+#define EFX_FEATURE_LINK_EVENTS 0x00000004
+#define EFX_FEATURE_PERIODIC_MAC_STATS 0x00000008
+#define EFX_FEATURE_MCDI 0x00000020
+#define EFX_FEATURE_LOOKAHEAD_SPLIT 0x00000040
+#define EFX_FEATURE_MAC_HEADER_FILTERS 0x00000080
+#define EFX_FEATURE_TURBO 0x00000100
+#define EFX_FEATURE_MCDI_DMA 0x00000200
+#define EFX_FEATURE_TX_SRC_FILTERS 0x00000400
+#define EFX_FEATURE_PIO_BUFFERS 0x00000800
+#define EFX_FEATURE_FW_ASSISTED_TSO 0x00001000
+#define EFX_FEATURE_FW_ASSISTED_TSO_V2 0x00002000
+#define EFX_FEATURE_PACKED_STREAM 0x00004000
+
+typedef struct efx_nic_cfg_s {
+ uint32_t enc_board_type;
+ uint32_t enc_phy_type;
+#if EFSYS_OPT_NAMES
+ char enc_phy_name[21];
+#endif
+ char enc_phy_revision[21];
+ efx_mon_type_t enc_mon_type;
+#if EFSYS_OPT_MON_STATS
+ uint32_t enc_mon_stat_dma_buf_size;
+ uint32_t enc_mon_stat_mask[(EFX_MON_NSTATS + 31) / 32];
+#endif
+ unsigned int enc_features;
+ uint8_t enc_mac_addr[6];
+ uint8_t enc_port; /* PHY port number */
+ uint32_t enc_intr_vec_base;
+ uint32_t enc_intr_limit;
+ uint32_t enc_evq_limit;
+ uint32_t enc_txq_limit;
+ uint32_t enc_rxq_limit;
+ uint32_t enc_txq_max_ndescs;
+ uint32_t enc_buftbl_limit;
+ uint32_t enc_piobuf_limit;
+ uint32_t enc_piobuf_size;
+ uint32_t enc_piobuf_min_alloc_size;
+ uint32_t enc_evq_timer_quantum_ns;
+ uint32_t enc_evq_timer_max_us;
+ uint32_t enc_clk_mult;
+ uint32_t enc_rx_prefix_size;
+ uint32_t enc_rx_buf_align_start;
+ uint32_t enc_rx_buf_align_end;
+#if EFSYS_OPT_LOOPBACK
+ efx_qword_t enc_loopback_types[EFX_LINK_NMODES];
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_PHY_FLAGS
+ uint32_t enc_phy_flags_mask;
+#endif /* EFSYS_OPT_PHY_FLAGS */
+#if EFSYS_OPT_PHY_LED_CONTROL
+ uint32_t enc_led_mask;
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+#if EFSYS_OPT_PHY_STATS
+ uint64_t enc_phy_stat_mask;
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_MCDI
+ uint8_t enc_mcdi_mdio_channel;
+#if EFSYS_OPT_PHY_STATS
+ uint32_t enc_mcdi_phy_stat_mask;
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_MON_STATS
+ uint32_t *enc_mcdi_sensor_maskp;
+ uint32_t enc_mcdi_sensor_mask_size;
+#endif /* EFSYS_OPT_MON_STATS */
+#endif /* EFSYS_OPT_MCDI */
+#if EFSYS_OPT_BIST
+ uint32_t enc_bist_mask;
+#endif /* EFSYS_OPT_BIST */
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ uint32_t enc_pf;
+ uint32_t enc_vf;
+ uint32_t enc_privilege_mask;
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+ boolean_t enc_bug26807_workaround;
+ boolean_t enc_bug35388_workaround;
+ boolean_t enc_bug41750_workaround;
+ boolean_t enc_bug61265_workaround;
+ boolean_t enc_rx_batching_enabled;
+ /* Maximum number of descriptors completed in an rx event. */
+ uint32_t enc_rx_batch_max;
+ /* Number of rx descriptors the hardware requires for a push. */
+ uint32_t enc_rx_push_align;
+ /* Maximum amount of data in DMA descriptor */
+ uint32_t enc_tx_dma_desc_size_max;
+ /*
+ * Boundary which DMA descriptor data must not cross or 0 if no
+ * limitation.
+ */
+ uint32_t enc_tx_dma_desc_boundary;
+ /*
+ * Maximum number of bytes into the packet the TCP header can start for
+ * the hardware to apply TSO packet edits.
+ */
+ uint32_t enc_tx_tso_tcp_header_offset_limit;
+ boolean_t enc_fw_assisted_tso_enabled;
+ boolean_t enc_fw_assisted_tso_v2_enabled;
+ /* Number of TSO contexts on the NIC (FATSOv2) */
+ uint32_t enc_fw_assisted_tso_v2_n_contexts;
+ boolean_t enc_hw_tx_insert_vlan_enabled;
+ /* Number of PFs on the NIC */
+ uint32_t enc_hw_pf_count;
+ /* Datapath firmware vadapter/vport/vswitch support */
+ boolean_t enc_datapath_cap_evb;
+ boolean_t enc_rx_disable_scatter_supported;
+ boolean_t enc_allow_set_mac_with_installed_filters;
+ boolean_t enc_enhanced_set_mac_supported;
+ boolean_t enc_init_evq_v2_supported;
+ boolean_t enc_rx_packed_stream_supported;
+ boolean_t enc_rx_var_packed_stream_supported;
+ boolean_t enc_pm_and_rxdp_counters;
+ boolean_t enc_mac_stats_40g_tx_size_bins;
+ /* External port identifier */
+ uint8_t enc_external_port;
+ uint32_t enc_mcdi_max_payload_length;
+ /* VPD may be per-PF or global */
+ boolean_t enc_vpd_is_global;
+ /* Minimum unidirectional bandwidth in Mb/s to max out all ports */
+ uint32_t enc_required_pcie_bandwidth_mbps;
+ uint32_t enc_max_pcie_link_gen;
+ /* Firmware verifies integrity of NVRAM updates */
+ uint32_t enc_fw_verified_nvram_update_required;
+} efx_nic_cfg_t;
+
+#define EFX_PCI_FUNCTION_IS_PF(_encp) ((_encp)->enc_vf == 0xffff)
+#define EFX_PCI_FUNCTION_IS_VF(_encp) ((_encp)->enc_vf != 0xffff)
+
+#define EFX_PCI_FUNCTION(_encp) \
+ (EFX_PCI_FUNCTION_IS_PF(_encp) ? (_encp)->enc_pf : (_encp)->enc_vf)
+
+#define EFX_PCI_VF_PARENT(_encp) ((_encp)->enc_pf)
+
+extern const efx_nic_cfg_t *
+efx_nic_cfg_get(
+ __in efx_nic_t *enp);
+
+typedef struct efx_nic_fw_info_s {
+ /* Basic FW version information */
+ uint16_t enfi_mc_fw_version[4];
+ /*
+ * If datapath capabilities can be detected,
+ * additional FW information is to be shown
+ */
+ boolean_t enfi_dpcpu_fw_ids_valid;
+ /* Rx and Tx datapath CPU FW IDs */
+ uint16_t enfi_rx_dpcpu_fw_id;
+ uint16_t enfi_tx_dpcpu_fw_id;
+} efx_nic_fw_info_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_get_fw_version(
+ __in efx_nic_t *enp,
+ __out efx_nic_fw_info_t *enfip);
+
+/* Driver resource limits (minimum required/maximum usable). */
+typedef struct efx_drv_limits_s {
+ uint32_t edl_min_evq_count;
+ uint32_t edl_max_evq_count;
+
+ uint32_t edl_min_rxq_count;
+ uint32_t edl_max_rxq_count;
+
+ uint32_t edl_min_txq_count;
+ uint32_t edl_max_txq_count;
+
+ /* PIO blocks (sub-allocated from piobuf) */
+ uint32_t edl_min_pio_alloc_size;
+ uint32_t edl_max_pio_alloc_count;
+} efx_drv_limits_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp);
+
+typedef enum efx_nic_region_e {
+ EFX_REGION_VI, /* Memory BAR UC mapping */
+ EFX_REGION_PIO_WRITE_VI, /* Memory BAR WC mapping */
+} efx_nic_region_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+efx_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *evq_countp,
+ __out uint32_t *rxq_countp,
+ __out uint32_t *txq_countp);
+
+
+#if EFSYS_OPT_VPD
+
+typedef enum efx_vpd_tag_e {
+ EFX_VPD_ID = 0x02,
+ EFX_VPD_END = 0x0f,
+ EFX_VPD_RO = 0x10,
+ EFX_VPD_RW = 0x11,
+} efx_vpd_tag_t;
+
+typedef uint16_t efx_vpd_keyword_t;
+
+typedef struct efx_vpd_value_s {
+ efx_vpd_tag_t evv_tag;
+ efx_vpd_keyword_t evv_keyword;
+ uint8_t evv_length;
+ uint8_t evv_value[0x100];
+} efx_vpd_value_t;
+
+
+#define EFX_VPD_KEYWORD(x, y) ((x) | ((y) << 8))
+
+extern __checkReturn efx_rc_t
+efx_vpd_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+efx_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_set(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_next(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+efx_vpd_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_VPD */
+
+/* NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+typedef enum efx_nvram_type_e {
+ EFX_NVRAM_INVALID = 0,
+ EFX_NVRAM_BOOTROM,
+ EFX_NVRAM_BOOTROM_CFG,
+ EFX_NVRAM_MC_FIRMWARE,
+ EFX_NVRAM_MC_GOLDEN,
+ EFX_NVRAM_PHY,
+ EFX_NVRAM_NULLPHY,
+ EFX_NVRAM_FPGA,
+ EFX_NVRAM_FCFW,
+ EFX_NVRAM_CPLD,
+ EFX_NVRAM_FPGA_BACKUP,
+ EFX_NVRAM_DYNAMIC_CFG,
+ EFX_NVRAM_LICENSE,
+ EFX_NVRAM_UEFIROM,
+ EFX_NVRAM_NTYPES,
+} efx_nvram_type_t;
+
+extern __checkReturn efx_rc_t
+efx_nvram_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+efx_nvram_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+efx_nvram_size(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+efx_nvram_rw_start(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out_opt size_t *pref_chunkp);
+
+extern __checkReturn efx_rc_t
+efx_nvram_rw_finish(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type);
+
+extern __checkReturn efx_rc_t
+efx_nvram_get_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+efx_nvram_read_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_nvram_set_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+efx_nvram_validate(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size);
+
+extern __checkReturn efx_rc_t
+efx_nvram_erase(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type);
+
+extern __checkReturn efx_rc_t
+efx_nvram_write_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+efx_nvram_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_BOOTCFG
+
+/* Report size and offset of bootcfg sector in NVRAM partition. */
+extern __checkReturn efx_rc_t
+efx_bootcfg_sector_info(
+ __in efx_nic_t *enp,
+ __in uint32_t pf,
+ __out_opt uint32_t *sector_countp,
+ __out size_t *offsetp,
+ __out size_t *max_sizep);
+
+/*
+ * Copy bootcfg sector data to a target buffer which may differ in size.
+ * Optionally corrects format errors in source buffer.
+ */
+extern efx_rc_t
+efx_bootcfg_copy_sector(
+ __in efx_nic_t *enp,
+ __inout_bcount(sector_length)
+ uint8_t *sector,
+ __in size_t sector_length,
+ __out_bcount(data_size) uint8_t *data,
+ __in size_t data_size,
+ __in boolean_t handle_format_errors);
+
+extern efx_rc_t
+efx_bootcfg_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern efx_rc_t
+efx_bootcfg_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+#endif /* EFSYS_OPT_BOOTCFG */
+
+#if EFSYS_OPT_DIAG
+
+typedef enum efx_pattern_type_t {
+ EFX_PATTERN_BYTE_INCREMENT = 0,
+ EFX_PATTERN_ALL_THE_SAME,
+ EFX_PATTERN_BIT_ALTERNATE,
+ EFX_PATTERN_BYTE_ALTERNATE,
+ EFX_PATTERN_BYTE_CHANGING,
+ EFX_PATTERN_BIT_SWEEP,
+ EFX_PATTERN_NTYPES
+} efx_pattern_type_t;
+
+typedef void
+(*efx_sram_pattern_fn_t)(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp);
+
+extern __checkReturn efx_rc_t
+efx_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_pattern_type_t type);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+efx_sram_buf_tbl_set(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in efsys_mem_t *esmp,
+ __in size_t n);
+
+extern void
+efx_sram_buf_tbl_clear(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in size_t n);
+
+#define EFX_BUF_TBL_SIZE 0x20000
+
+#define EFX_BUF_SIZE 4096
+
+/* EV */
+
+typedef struct efx_evq_s efx_evq_t;
+
+#if EFSYS_OPT_QSTATS
+
+/* START MKCONFIG GENERATED EfxHeaderEventQueueBlock 6f3843f5fe7cc843 */
+typedef enum efx_ev_qstat_e {
+ EV_ALL,
+ EV_RX,
+ EV_RX_OK,
+ EV_RX_FRM_TRUNC,
+ EV_RX_TOBE_DISC,
+ EV_RX_PAUSE_FRM_ERR,
+ EV_RX_BUF_OWNER_ID_ERR,
+ EV_RX_IPV4_HDR_CHKSUM_ERR,
+ EV_RX_TCP_UDP_CHKSUM_ERR,
+ EV_RX_ETH_CRC_ERR,
+ EV_RX_IP_FRAG_ERR,
+ EV_RX_MCAST_PKT,
+ EV_RX_MCAST_HASH_MATCH,
+ EV_RX_TCP_IPV4,
+ EV_RX_TCP_IPV6,
+ EV_RX_UDP_IPV4,
+ EV_RX_UDP_IPV6,
+ EV_RX_OTHER_IPV4,
+ EV_RX_OTHER_IPV6,
+ EV_RX_NON_IP,
+ EV_RX_BATCH,
+ EV_TX,
+ EV_TX_WQ_FF_FULL,
+ EV_TX_PKT_ERR,
+ EV_TX_PKT_TOO_BIG,
+ EV_TX_UNEXPECTED,
+ EV_GLOBAL,
+ EV_GLOBAL_MNT,
+ EV_DRIVER,
+ EV_DRIVER_SRM_UPD_DONE,
+ EV_DRIVER_TX_DESCQ_FLS_DONE,
+ EV_DRIVER_RX_DESCQ_FLS_DONE,
+ EV_DRIVER_RX_DESCQ_FLS_FAILED,
+ EV_DRIVER_RX_DSC_ERROR,
+ EV_DRIVER_TX_DSC_ERROR,
+ EV_DRV_GEN,
+ EV_MCDI_RESPONSE,
+ EV_NQSTATS
+} efx_ev_qstat_t;
+
+/* END MKCONFIG GENERATED EfxHeaderEventQueueBlock */
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern __checkReturn efx_rc_t
+efx_ev_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_ev_fini(
+ __in efx_nic_t *enp);
+
+#define EFX_EVQ_MAXNEVS 32768
+#define EFX_EVQ_MINNEVS 512
+
+#define EFX_EVQ_SIZE(_nevs) ((_nevs) * sizeof (efx_qword_t))
+#define EFX_EVQ_NBUFS(_nevs) (EFX_EVQ_SIZE(_nevs) / EFX_BUF_SIZE)
+
+#define EFX_EVQ_FLAGS_TYPE_MASK (0x3)
+#define EFX_EVQ_FLAGS_TYPE_AUTO (0x0)
+#define EFX_EVQ_FLAGS_TYPE_THROUGHPUT (0x1)
+#define EFX_EVQ_FLAGS_TYPE_LOW_LATENCY (0x2)
+
+#define EFX_EVQ_FLAGS_NOTIFY_MASK (0xC)
+#define EFX_EVQ_FLAGS_NOTIFY_INTERRUPT (0x0) /* Interrupting (default) */
+#define EFX_EVQ_FLAGS_NOTIFY_DISABLED (0x4) /* Non-interrupting */
+
+extern __checkReturn efx_rc_t
+efx_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __deref_out efx_evq_t **eepp);
+
+extern void
+efx_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data);
+
+typedef __checkReturn boolean_t
+(*efx_initialized_ev_t)(
+ __in_opt void *arg);
+
+#define EFX_PKT_UNICAST 0x0004
+#define EFX_PKT_START 0x0008
+
+#define EFX_PKT_VLAN_TAGGED 0x0010
+#define EFX_CKSUM_TCPUDP 0x0020
+#define EFX_CKSUM_IPV4 0x0040
+#define EFX_PKT_CONT 0x0080
+
+#define EFX_CHECK_VLAN 0x0100
+#define EFX_PKT_TCP 0x0200
+#define EFX_PKT_UDP 0x0400
+#define EFX_PKT_IPV4 0x0800
+
+#define EFX_PKT_IPV6 0x1000
+#define EFX_PKT_PREFIX_LEN 0x2000
+#define EFX_ADDR_MISMATCH 0x4000
+#define EFX_DISCARD 0x8000
+
+/*
+ * The following flags are used only for packed stream
+ * mode. The values for the flags are reused to fit into 16 bit,
+ * since EFX_PKT_START and EFX_PKT_CONT are never used in
+ * packed stream mode
+ */
+#define EFX_PKT_PACKED_STREAM_NEW_BUFFER EFX_PKT_START
+#define EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE EFX_PKT_CONT
+
+
+#define EFX_EV_RX_NLABELS 32
+#define EFX_EV_TX_NLABELS 32
+
+typedef __checkReturn boolean_t
+(*efx_rx_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t id,
+ __in uint32_t size,
+ __in uint16_t flags);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+/*
+ * Packed stream mode is documented in SF-112241-TC.
+ * The general idea is that, instead of putting each incoming
+ * packet into a separate buffer which is specified in a RX
+ * descriptor, a large buffer is provided to the hardware and
+ * packets are put there in a continuous stream.
+ * The main advantage of such an approach is that RX queue refilling
+ * happens much less frequently.
+ */
+
+typedef __checkReturn boolean_t
+(*efx_rx_ps_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t id,
+ __in uint32_t pkt_count,
+ __in uint16_t flags);
+
+#endif
+
+typedef __checkReturn boolean_t
+(*efx_tx_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t id);
+
+#define EFX_EXCEPTION_RX_RECOVERY 0x00000001
+#define EFX_EXCEPTION_RX_DSC_ERROR 0x00000002
+#define EFX_EXCEPTION_TX_DSC_ERROR 0x00000003
+#define EFX_EXCEPTION_UNKNOWN_SENSOREVT 0x00000004
+#define EFX_EXCEPTION_FWALERT_SRAM 0x00000005
+#define EFX_EXCEPTION_UNKNOWN_FWALERT 0x00000006
+#define EFX_EXCEPTION_RX_ERROR 0x00000007
+#define EFX_EXCEPTION_TX_ERROR 0x00000008
+#define EFX_EXCEPTION_EV_ERROR 0x00000009
+
+typedef __checkReturn boolean_t
+(*efx_exception_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t data);
+
+typedef __checkReturn boolean_t
+(*efx_rxq_flush_done_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t rxq_index);
+
+typedef __checkReturn boolean_t
+(*efx_rxq_flush_failed_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t rxq_index);
+
+typedef __checkReturn boolean_t
+(*efx_txq_flush_done_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t txq_index);
+
+typedef __checkReturn boolean_t
+(*efx_software_ev_t)(
+ __in_opt void *arg,
+ __in uint16_t magic);
+
+typedef __checkReturn boolean_t
+(*efx_sram_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t code);
+
+#define EFX_SRAM_CLEAR 0
+#define EFX_SRAM_UPDATE 1
+#define EFX_SRAM_ILLEGAL_CLEAR 2
+
+typedef __checkReturn boolean_t
+(*efx_wake_up_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label);
+
+typedef __checkReturn boolean_t
+(*efx_timer_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label);
+
+typedef __checkReturn boolean_t
+(*efx_link_change_ev_t)(
+ __in_opt void *arg,
+ __in efx_link_mode_t link_mode);
+
+#if EFSYS_OPT_MON_STATS
+
+typedef __checkReturn boolean_t
+(*efx_monitor_ev_t)(
+ __in_opt void *arg,
+ __in efx_mon_stat_t id,
+ __in efx_mon_stat_value_t value);
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+#if EFSYS_OPT_MAC_STATS
+
+typedef __checkReturn boolean_t
+(*efx_mac_stats_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t generation
+ );
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+typedef struct efx_ev_callbacks_s {
+ efx_initialized_ev_t eec_initialized;
+ efx_rx_ev_t eec_rx;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ efx_rx_ps_ev_t eec_rx_ps;
+#endif
+ efx_tx_ev_t eec_tx;
+ efx_exception_ev_t eec_exception;
+ efx_rxq_flush_done_ev_t eec_rxq_flush_done;
+ efx_rxq_flush_failed_ev_t eec_rxq_flush_failed;
+ efx_txq_flush_done_ev_t eec_txq_flush_done;
+ efx_software_ev_t eec_software;
+ efx_sram_ev_t eec_sram;
+ efx_wake_up_ev_t eec_wake_up;
+ efx_timer_ev_t eec_timer;
+ efx_link_change_ev_t eec_link_change;
+#if EFSYS_OPT_MON_STATS
+ efx_monitor_ev_t eec_monitor;
+#endif /* EFSYS_OPT_MON_STATS */
+#if EFSYS_OPT_MAC_STATS
+ efx_mac_stats_ev_t eec_mac_stats;
+#endif /* EFSYS_OPT_MAC_STATS */
+} efx_ev_callbacks_t;
+
+extern __checkReturn boolean_t
+efx_ev_qpending(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+#if EFSYS_OPT_EV_PREFETCH
+
+extern void
+efx_ev_qprefetch(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+extern void
+efx_ev_qpoll(
+ __in efx_evq_t *eep,
+ __inout unsigned int *countp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+extern __checkReturn efx_rc_t
+efx_ev_usecs_to_ticks(
+ __in efx_nic_t *enp,
+ __in unsigned int usecs,
+ __out unsigned int *ticksp);
+
+extern __checkReturn efx_rc_t
+efx_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us);
+
+extern __checkReturn efx_rc_t
+efx_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+#if EFSYS_OPT_QSTATS
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_ev_qstat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern void
+efx_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern void
+efx_ev_qdestroy(
+ __in efx_evq_t *eep);
+
+/* RX */
+
+extern __checkReturn efx_rc_t
+efx_rx_init(
+ __inout efx_nic_t *enp);
+
+extern void
+efx_rx_fini(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_RX_SCATTER
+ __checkReturn efx_rc_t
+efx_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+
+typedef enum efx_rx_hash_alg_e {
+ EFX_RX_HASHALG_LFSR = 0,
+ EFX_RX_HASHALG_TOEPLITZ
+} efx_rx_hash_alg_t;
+
+#define EFX_RX_HASH_IPV4 (1U << 0)
+#define EFX_RX_HASH_TCPIPV4 (1U << 1)
+#define EFX_RX_HASH_IPV6 (1U << 2)
+#define EFX_RX_HASH_TCPIPV6 (1U << 3)
+
+typedef unsigned int efx_rx_hash_type_t;
+
+typedef enum efx_rx_hash_support_e {
+ EFX_RX_HASH_UNAVAILABLE = 0, /* Hardware hash not inserted */
+ EFX_RX_HASH_AVAILABLE /* Insert hash with/without RSS */
+} efx_rx_hash_support_t;
+
+#define EFX_RSS_TBL_SIZE 128 /* Rows in RX indirection table */
+#define EFX_MAXRSS 64 /* RX indirection entry range */
+#define EFX_MAXRSS_LEGACY 16 /* See bug16611 and bug17213 */
+
+typedef enum efx_rx_scale_support_e {
+ EFX_RX_SCALE_UNAVAILABLE = 0, /* Not supported */
+ EFX_RX_SCALE_EXCLUSIVE, /* Writable key/indirection table */
+ EFX_RX_SCALE_SHARED /* Read-only key/indirection table */
+} efx_rx_scale_support_t;
+
+extern __checkReturn efx_rc_t
+efx_rx_hash_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_hash_support_t *supportp);
+
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_scale_support_t *supportp);
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert);
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n);
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n);
+
+extern __checkReturn uint32_t
+efx_pseudo_hdr_hash_get(
+ __in efx_rxq_t *erp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer);
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+extern __checkReturn efx_rc_t
+efx_pseudo_hdr_pkt_length_get(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __out uint16_t *pkt_lengthp);
+
+#define EFX_RXQ_MAXNDESCS 4096
+#define EFX_RXQ_MINNDESCS 512
+
+#define EFX_RXQ_SIZE(_ndescs) ((_ndescs) * sizeof (efx_qword_t))
+#define EFX_RXQ_NBUFS(_ndescs) (EFX_RXQ_SIZE(_ndescs) / EFX_BUF_SIZE)
+#define EFX_RXQ_LIMIT(_ndescs) ((_ndescs) - 16)
+#define EFX_RXQ_DC_NDESCS(_dcsize) (8 << _dcsize)
+
+typedef enum efx_rxq_type_e {
+ EFX_RXQ_TYPE_DEFAULT,
+ EFX_RXQ_TYPE_SCATTER,
+ EFX_RXQ_TYPE_PACKED_STREAM_1M,
+ EFX_RXQ_TYPE_PACKED_STREAM_512K,
+ EFX_RXQ_TYPE_PACKED_STREAM_256K,
+ EFX_RXQ_TYPE_PACKED_STREAM_128K,
+ EFX_RXQ_TYPE_PACKED_STREAM_64K,
+ EFX_RXQ_NTYPES
+} efx_rxq_type_t;
+
+extern __checkReturn efx_rc_t
+efx_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp);
+
+typedef struct efx_buffer_s {
+ efsys_dma_addr_t eb_addr;
+ size_t eb_size;
+ boolean_t eb_eop;
+} efx_buffer_t;
+
+typedef struct efx_desc_s {
+ efx_qword_t ed_eq;
+} efx_desc_t;
+
+extern void
+efx_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added);
+
+extern void
+efx_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+/*
+ * Fake length for RXQ descriptors in packed stream mode
+ * to make hardware happy
+ */
+#define EFX_RXQ_PACKED_STREAM_FAKE_BUF_SIZE 32
+
+extern void
+efx_rx_qps_update_credits(
+ __in efx_rxq_t *erp);
+
+extern __checkReturn uint8_t *
+efx_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp);
+#endif
+
+extern __checkReturn efx_rc_t
+efx_rx_qflush(
+ __in efx_rxq_t *erp);
+
+extern void
+efx_rx_qenable(
+ __in efx_rxq_t *erp);
+
+extern void
+efx_rx_qdestroy(
+ __in efx_rxq_t *erp);
+
+/* TX */
+
+typedef struct efx_txq_s efx_txq_t;
+
+#if EFSYS_OPT_QSTATS
+
+/* START MKCONFIG GENERATED EfxHeaderTransmitQueueBlock 12dff8778598b2db */
+typedef enum efx_tx_qstat_e {
+ TX_POST,
+ TX_POST_PIO,
+ TX_NQSTATS
+} efx_tx_qstat_t;
+
+/* END MKCONFIG GENERATED EfxHeaderTransmitQueueBlock */
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern __checkReturn efx_rc_t
+efx_tx_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_tx_fini(
+ __in efx_nic_t *enp);
+
+#define EFX_TXQ_MINNDESCS 512
+
+#define EFX_TXQ_SIZE(_ndescs) ((_ndescs) * sizeof (efx_qword_t))
+#define EFX_TXQ_NBUFS(_ndescs) (EFX_TXQ_SIZE(_ndescs) / EFX_BUF_SIZE)
+#define EFX_TXQ_LIMIT(_ndescs) ((_ndescs) - 16)
+#define EFX_TXQ_DC_NDESCS(_dcsize) (8 << _dcsize)
+
+#define EFX_TXQ_MAX_BUFS 8 /* Maximum independent of EFX_BUG35388_WORKAROUND. */
+
+#define EFX_TXQ_CKSUM_IPV4 0x0001
+#define EFX_TXQ_CKSUM_TCPUDP 0x0002
+#define EFX_TXQ_FATSOV2 0x0004
+
+extern __checkReturn efx_rc_t
+efx_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_txq_t **etpp,
+ __out unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns);
+
+extern void
+efx_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed);
+
+extern __checkReturn efx_rc_t
+efx_tx_qflush(
+ __in efx_txq_t *etp);
+
+extern void
+efx_tx_qenable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpio_enable(
+ __in efx_txq_t *etp);
+
+extern void
+efx_tx_qpio_disable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(buf_length) uint8_t *buffer,
+ __in size_t buf_length,
+ __in size_t pio_buf_offset);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern void
+efx_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp);
+
+extern void
+efx_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp);
+
+/* Number of FATSOv2 option descriptors */
+#define EFX_TX_FATSOV2_OPT_NDESCS 2
+
+/* Maximum number of DMA segments per TSO packet (not superframe) */
+#define EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX 24
+
+extern void
+efx_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t tcp_mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count);
+
+extern void
+efx_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t tci,
+ __out efx_desc_t *edp);
+
+#if EFSYS_OPT_QSTATS
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_tx_qstat_name(
+ __in efx_nic_t *etp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern void
+efx_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern void
+efx_tx_qdestroy(
+ __in efx_txq_t *etp);
+
+
+/* FILTER */
+
+#if EFSYS_OPT_FILTER
+
+#define EFX_ETHER_TYPE_IPV4 0x0800
+#define EFX_ETHER_TYPE_IPV6 0x86DD
+
+#define EFX_IPPROTO_TCP 6
+#define EFX_IPPROTO_UDP 17
+
+/* Use RSS to spread across multiple queues */
+#define EFX_FILTER_FLAG_RX_RSS 0x01
+/* Enable RX scatter */
+#define EFX_FILTER_FLAG_RX_SCATTER 0x02
+/*
+ * Override an automatic filter (priority EFX_FILTER_PRI_AUTO).
+ * May only be set by the filter implementation for each type.
+ * A removal request will restore the automatic filter in its place.
+ */
+#define EFX_FILTER_FLAG_RX_OVER_AUTO 0x04
+/* Filter is for RX */
+#define EFX_FILTER_FLAG_RX 0x08
+/* Filter is for TX */
+#define EFX_FILTER_FLAG_TX 0x10
+
+typedef unsigned int efx_filter_flags_t;
+
+typedef enum efx_filter_match_flags_e {
+ EFX_FILTER_MATCH_REM_HOST = 0x0001, /* Match by remote IP host
+ * address */
+ EFX_FILTER_MATCH_LOC_HOST = 0x0002, /* Match by local IP host
+ * address */
+ EFX_FILTER_MATCH_REM_MAC = 0x0004, /* Match by remote MAC address */
+ EFX_FILTER_MATCH_REM_PORT = 0x0008, /* Match by remote TCP/UDP port */
+ EFX_FILTER_MATCH_LOC_MAC = 0x0010, /* Match by remote TCP/UDP port */
+ EFX_FILTER_MATCH_LOC_PORT = 0x0020, /* Match by local TCP/UDP port */
+ EFX_FILTER_MATCH_ETHER_TYPE = 0x0040, /* Match by Ether-type */
+ EFX_FILTER_MATCH_INNER_VID = 0x0080, /* Match by inner VLAN ID */
+ EFX_FILTER_MATCH_OUTER_VID = 0x0100, /* Match by outer VLAN ID */
+ EFX_FILTER_MATCH_IP_PROTO = 0x0200, /* Match by IP transport
+ * protocol */
+ /* Match otherwise-unmatched multicast and broadcast packets */
+ EFX_FILTER_MATCH_UNKNOWN_MCAST_DST = 0x40000000,
+ /* Match otherwise-unmatched unicast packets */
+ EFX_FILTER_MATCH_UNKNOWN_UCAST_DST = 0x80000000,
+} efx_filter_match_flags_t;
+
+typedef enum efx_filter_priority_s {
+ EFX_FILTER_PRI_HINT = 0, /* Performance hint */
+ EFX_FILTER_PRI_AUTO, /* Automatic filter based on device
+ * address list or hardware
+ * requirements. This may only be used
+ * by the filter implementation for
+ * each NIC type. */
+ EFX_FILTER_PRI_MANUAL, /* Manually configured filter */
+ EFX_FILTER_PRI_REQUIRED, /* Required for correct behaviour of the
+ * client (e.g. SR-IOV, HyperV VMQ etc.)
+ */
+} efx_filter_priority_t;
+
+/*
+ * FIXME: All these fields are assumed to be in little-endian byte order.
+ * It may be better for some to be big-endian. See bug42804.
+ */
+
+typedef struct efx_filter_spec_s {
+ uint32_t efs_match_flags;
+ uint32_t efs_priority:2;
+ uint32_t efs_flags:6;
+ uint32_t efs_dmaq_id:12;
+ uint32_t efs_rss_context;
+ uint16_t efs_outer_vid;
+ uint16_t efs_inner_vid;
+ uint8_t efs_loc_mac[EFX_MAC_ADDR_LEN];
+ uint8_t efs_rem_mac[EFX_MAC_ADDR_LEN];
+ uint16_t efs_ether_type;
+ uint8_t efs_ip_proto;
+ uint16_t efs_loc_port;
+ uint16_t efs_rem_port;
+ efx_oword_t efs_rem_host;
+ efx_oword_t efs_loc_host;
+} efx_filter_spec_t;
+
+
+/* Default values for use in filter specifications */
+#define EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT 0xffffffff
+#define EFX_FILTER_SPEC_RX_DMAQ_ID_DROP 0xfff
+#define EFX_FILTER_SPEC_VID_UNSPEC 0xffff
+
+extern __checkReturn efx_rc_t
+efx_filter_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_filter_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_filter_insert(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+efx_filter_remove(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+efx_filter_restore(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp);
+
+extern void
+efx_filter_spec_init_rx(
+ __out efx_filter_spec_t *spec,
+ __in efx_filter_priority_t priority,
+ __in efx_filter_flags_t flags,
+ __in efx_rxq_t *erp);
+
+extern void
+efx_filter_spec_init_tx(
+ __out efx_filter_spec_t *spec,
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t host,
+ __in uint16_t port);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_full(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t lhost,
+ __in uint16_t lport,
+ __in uint32_t rhost,
+ __in uint16_t rport);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_eth_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint16_t vid,
+ __in const uint8_t *addr);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_uc_def(
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_mc_def(
+ __inout efx_filter_spec_t *spec);
+
+#endif /* EFSYS_OPT_FILTER */
+
+/* HASH */
+
+extern __checkReturn uint32_t
+efx_hash_dwords(
+ __in_ecount(count) uint32_t const *input,
+ __in size_t count,
+ __in uint32_t init);
+
+extern __checkReturn uint32_t
+efx_hash_bytes(
+ __in_ecount(length) uint8_t const *input,
+ __in size_t length,
+ __in uint32_t init);
+
+#if EFSYS_OPT_LICENSING
+
+/* LICENSING */
+
+typedef struct efx_key_stats_s {
+ uint32_t eks_valid;
+ uint32_t eks_invalid;
+ uint32_t eks_blacklisted;
+ uint32_t eks_unverifiable;
+ uint32_t eks_wrong_node;
+ uint32_t eks_licensed_apps_lo;
+ uint32_t eks_licensed_apps_hi;
+ uint32_t eks_licensed_features_lo;
+ uint32_t eks_licensed_features_hi;
+} efx_key_stats_t;
+
+extern __checkReturn efx_rc_t
+efx_lic_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_lic_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn boolean_t
+efx_lic_check_support(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_lic_update_licenses(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_lic_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *ksp);
+
+extern __checkReturn efx_rc_t
+efx_lic_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp);
+
+extern __checkReturn efx_rc_t
+efx_lic_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_opt uint8_t *bufferp);
+
+
+extern __checkReturn efx_rc_t
+efx_lic_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+extern __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+#endif /* EFSYS_OPT_LICENSING */
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_bootcfg.c b/src/seastar/dpdk/drivers/net/sfc/base/efx_bootcfg.c
new file mode 100644
index 00000000..d589c86a
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_bootcfg.c
@@ -0,0 +1,563 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_BOOTCFG
+
+/*
+ * Maximum size of BOOTCFG block across all nics as understood by SFCgPXE.
+ * NOTE: This is larger than the Medford per-PF bootcfg sector.
+ */
+#define BOOTCFG_MAX_SIZE 0x1000
+
+/* Medford per-PF bootcfg sector */
+#define BOOTCFG_PER_PF 0x800
+#define BOOTCFG_PF_COUNT 16
+
+#define DHCP_END ((uint8_t)0xff)
+#define DHCP_PAD ((uint8_t)0)
+
+
+/* Report the layout of bootcfg sectors in NVRAM partition. */
+ __checkReturn efx_rc_t
+efx_bootcfg_sector_info(
+ __in efx_nic_t *enp,
+ __in uint32_t pf,
+ __out_opt uint32_t *sector_countp,
+ __out size_t *offsetp,
+ __out size_t *max_sizep)
+{
+ uint32_t count;
+ size_t max_size;
+ size_t offset;
+ int rc;
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ max_size = BOOTCFG_MAX_SIZE;
+ offset = 0;
+ count = 1;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ max_size = BOOTCFG_MAX_SIZE;
+ offset = 0;
+ count = 1;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD: {
+ /* Shared partition (array indexed by PF) */
+ max_size = BOOTCFG_PER_PF;
+ count = BOOTCFG_PF_COUNT;
+ if (pf >= count) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ offset = max_size * pf;
+ break;
+ }
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ EFSYS_ASSERT3U(max_size, <=, BOOTCFG_MAX_SIZE);
+
+ if (sector_countp != NULL)
+ *sector_countp = count;
+ *offsetp = offset;
+ *max_sizep = max_size;
+
+ return (0);
+
+#if EFSYS_OPT_MEDFORD
+fail2:
+ EFSYS_PROBE(fail2);
+#endif
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+
+static __checkReturn uint8_t
+efx_bootcfg_csum(
+ __in efx_nic_t *enp,
+ __in_bcount(size) uint8_t const *data,
+ __in size_t size)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ unsigned int pos;
+ uint8_t checksum = 0;
+
+ for (pos = 0; pos < size; pos++)
+ checksum += data[pos];
+ return (checksum);
+}
+
+static __checkReturn efx_rc_t
+efx_bootcfg_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) uint8_t const *data,
+ __in size_t size,
+ __out_opt size_t *usedp)
+{
+ size_t offset = 0;
+ size_t used = 0;
+ efx_rc_t rc;
+
+ /* Start parsing tags immediately after the checksum */
+ for (offset = 1; offset < size; ) {
+ uint8_t tag;
+ uint8_t length;
+
+ /* Consume tag */
+ tag = data[offset];
+ if (tag == DHCP_END) {
+ offset++;
+ used = offset;
+ break;
+ }
+ if (tag == DHCP_PAD) {
+ offset++;
+ continue;
+ }
+
+ /* Consume length */
+ if (offset + 1 >= size) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+ length = data[offset + 1];
+
+ /* Consume *length */
+ if (offset + 1 + length >= size) {
+ rc = ENOSPC;
+ goto fail2;
+ }
+
+ offset += 2 + length;
+ used = offset;
+ }
+
+ /* Checksum the entire sector, including bytes after any DHCP_END */
+ if (efx_bootcfg_csum(enp, data, size) != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ if (usedp != NULL)
+ *usedp = used;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Copy bootcfg sector data to a target buffer which may differ in size.
+ * Optionally corrects format errors in source buffer.
+ */
+ efx_rc_t
+efx_bootcfg_copy_sector(
+ __in efx_nic_t *enp,
+ __inout_bcount(sector_length)
+ uint8_t *sector,
+ __in size_t sector_length,
+ __out_bcount(data_size) uint8_t *data,
+ __in size_t data_size,
+ __in boolean_t handle_format_errors)
+{
+ size_t used_bytes;
+ efx_rc_t rc;
+
+ /* Verify that the area is correctly formatted and checksummed */
+ rc = efx_bootcfg_verify(enp, sector, sector_length,
+ &used_bytes);
+
+ if (!handle_format_errors) {
+ if (rc != 0)
+ goto fail1;
+
+ if ((used_bytes < 2) ||
+ (sector[used_bytes - 1] != DHCP_END)) {
+ /* Block too short, or DHCP_END missing */
+ rc = ENOENT;
+ goto fail2;
+ }
+ }
+
+ /* Synthesize empty format on verification failure */
+ if (rc != 0 || used_bytes == 0) {
+ sector[0] = 0;
+ sector[1] = DHCP_END;
+ used_bytes = 2;
+ }
+ EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */
+ EFSYS_ASSERT(used_bytes <= sector_length);
+ EFSYS_ASSERT(sector_length >= 2);
+
+ /*
+ * Legacy bootcfg sectors don't terminate with a DHCP_END character.
+ * Modify the returned payload so it does.
+ * Reinitialise the sector if there isn't room for the character.
+ */
+ if (sector[used_bytes - 1] != DHCP_END) {
+ if (used_bytes >= sector_length) {
+ sector[0] = 0;
+ used_bytes = 1;
+ }
+ sector[used_bytes] = DHCP_END;
+ ++used_bytes;
+ }
+
+ /*
+ * Verify that the target buffer is large enough for the
+ * entire used bootcfg area, then copy into the target buffer.
+ */
+ if (used_bytes > data_size) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+ memcpy(data, sector, used_bytes);
+
+ /* Zero out the unused portion of the target buffer */
+ if (used_bytes < data_size)
+ (void) memset(data + used_bytes, 0, data_size - used_bytes);
+
+ /*
+ * The checksum includes trailing data after any DHCP_END character,
+ * which we've just modified (by truncation or appending DHCP_END).
+ */
+ data[0] -= efx_bootcfg_csum(enp, data, data_size);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ efx_rc_t
+efx_bootcfg_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ uint8_t *payload = NULL;
+ size_t used_bytes;
+ size_t partn_length;
+ size_t sector_length;
+ size_t sector_offset;
+ efx_rc_t rc;
+ uint32_t sector_number;
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ sector_number = enp->en_nic_cfg.enc_pf;
+#else
+ sector_number = 0;
+#endif
+ rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &partn_length);
+ if (rc != 0)
+ goto fail1;
+
+ /* The bootcfg sector may be stored in a (larger) shared partition */
+ rc = efx_bootcfg_sector_info(enp, sector_number,
+ NULL, &sector_offset, &sector_length);
+ if (rc != 0)
+ goto fail2;
+
+ if (sector_length > BOOTCFG_MAX_SIZE)
+ sector_length = BOOTCFG_MAX_SIZE;
+
+ if (sector_offset + sector_length > partn_length) {
+ /* Partition is too small */
+ rc = EFBIG;
+ goto fail3;
+ }
+
+ /*
+ * We need to read the entire BOOTCFG sector to ensure we read all the
+ * tags, because legacy bootcfg sectors are not guaranteed to end with
+ * a DHCP_END character. If the user hasn't supplied a sufficiently
+ * large buffer then use our own buffer.
+ */
+ if (sector_length > size) {
+ EFSYS_KMEM_ALLOC(enp->en_esip, sector_length, payload);
+ if (payload == NULL) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+ } else
+ payload = (uint8_t *)data;
+
+ if ((rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0)
+ goto fail5;
+
+ if ((rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG,
+ sector_offset, (caddr_t)payload, sector_length)) != 0) {
+ (void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG);
+ goto fail6;
+ }
+
+ if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG)) != 0)
+ goto fail7;
+
+ /* Verify that the area is correctly formatted and checksummed */
+ rc = efx_bootcfg_verify(enp, (caddr_t)payload, sector_length,
+ &used_bytes);
+ if (rc != 0 || used_bytes == 0) {
+ payload[0] = (uint8_t)~DHCP_END;
+ payload[1] = DHCP_END;
+ used_bytes = 2;
+ }
+
+ EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */
+ EFSYS_ASSERT(used_bytes <= sector_length);
+
+ /*
+ * Legacy bootcfg sectors don't terminate with a DHCP_END character.
+ * Modify the returned payload so it does. BOOTCFG_MAX_SIZE is by
+ * definition large enough for any valid (per-port) bootcfg sector,
+ * so reinitialise the sector if there isn't room for the character.
+ */
+ if (payload[used_bytes - 1] != DHCP_END) {
+ if (used_bytes + 1 > sector_length) {
+ payload[0] = 0;
+ used_bytes = 1;
+ }
+
+ payload[used_bytes] = DHCP_END;
+ ++used_bytes;
+ }
+
+ /*
+ * Verify that the user supplied buffer is large enough for the
+ * entire used bootcfg area, then copy into the user supplied buffer.
+ */
+ if (used_bytes > size) {
+ rc = ENOSPC;
+ goto fail8;
+ }
+ if (sector_length > size) {
+ memcpy(data, payload, used_bytes);
+ EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
+ }
+
+ /* Zero out the unused portion of the user buffer */
+ if (used_bytes < size)
+ (void) memset(data + used_bytes, 0, size - used_bytes);
+
+ /*
+ * The checksum includes trailing data after any DHCP_END character,
+ * which we've just modified (by truncation or appending DHCP_END).
+ */
+ data[0] -= efx_bootcfg_csum(enp, data, size);
+
+ return (0);
+
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+ if (sector_length > size)
+ EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ efx_rc_t
+efx_bootcfg_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ uint8_t *partn_data;
+ uint8_t checksum;
+ size_t partn_length;
+ size_t sector_length;
+ size_t sector_offset;
+ size_t used_bytes;
+ efx_rc_t rc;
+ uint32_t sector_number;
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ sector_number = enp->en_nic_cfg.enc_pf;
+#else
+ sector_number = 0;
+#endif
+
+ rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &partn_length);
+ if (rc != 0)
+ goto fail1;
+
+ /* The bootcfg sector may be stored in a (larger) shared partition */
+ rc = efx_bootcfg_sector_info(enp, sector_number,
+ NULL, &sector_offset, &sector_length);
+ if (rc != 0)
+ goto fail2;
+
+ if (sector_length > BOOTCFG_MAX_SIZE)
+ sector_length = BOOTCFG_MAX_SIZE;
+
+ if (sector_offset + sector_length > partn_length) {
+ /* Partition is too small */
+ rc = EFBIG;
+ goto fail3;
+ }
+
+ if ((rc = efx_bootcfg_verify(enp, data, size, &used_bytes)) != 0)
+ goto fail4;
+
+ /* The caller *must* terminate their block with a DHCP_END character */
+ if ((used_bytes < 2) || ((uint8_t)data[used_bytes - 1] != DHCP_END)) {
+ /* Block too short or DHCP_END missing */
+ rc = ENOENT;
+ goto fail5;
+ }
+
+ /* Check that the hardware has support for this much data */
+ if (used_bytes > MIN(sector_length, BOOTCFG_MAX_SIZE)) {
+ rc = ENOSPC;
+ goto fail6;
+ }
+
+ /*
+ * If the BOOTCFG sector is stored in a shared partition, then we must
+ * read the whole partition and insert the updated bootcfg sector at the
+ * correct offset.
+ */
+ EFSYS_KMEM_ALLOC(enp->en_esip, partn_length, partn_data);
+ if (partn_data == NULL) {
+ rc = ENOMEM;
+ goto fail7;
+ }
+
+ rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL);
+ if (rc != 0)
+ goto fail8;
+
+ /* Read the entire partition */
+ rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG, 0,
+ (caddr_t)partn_data, partn_length);
+ if (rc != 0)
+ goto fail9;
+
+ /*
+ * Insert the BOOTCFG sector into the partition, Zero out all data after
+ * the DHCP_END tag, and adjust the checksum.
+ */
+ (void) memset(partn_data + sector_offset, 0x0, sector_length);
+ (void) memcpy(partn_data + sector_offset, data, used_bytes);
+
+ checksum = efx_bootcfg_csum(enp, data, used_bytes);
+ partn_data[sector_offset] -= checksum;
+
+ if ((rc = efx_nvram_erase(enp, EFX_NVRAM_BOOTROM_CFG)) != 0)
+ goto fail10;
+
+ if ((rc = efx_nvram_write_chunk(enp, EFX_NVRAM_BOOTROM_CFG,
+ 0, (caddr_t)partn_data, partn_length)) != 0)
+ goto fail11;
+
+ if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG)) != 0)
+ goto fail12;
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_length, partn_data);
+
+ return (0);
+
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+
+ (void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG);
+fail8:
+ EFSYS_PROBE(fail8);
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_length, partn_data);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_BOOTCFG */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_check.h b/src/seastar/dpdk/drivers/net/sfc/base/efx_check.h
new file mode 100644
index 00000000..c8548c04
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_check.h
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_CHECK_H
+#define _SYS_EFX_CHECK_H
+
+#include "efsys.h"
+
+/*
+ * Check that the efsys.h header in client code has a valid combination of
+ * EFSYS_OPT_xxx options.
+ *
+ * NOTE: Keep checks for obsolete options here to ensure that they are removed
+ * from client code (and do not reappear in merges from other branches).
+ */
+
+#ifdef EFSYS_OPT_FALCON
+# error "FALCON is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_BOOTCFG
+/* Support NVRAM based boot config */
+# if !EFSYS_OPT_NVRAM
+# error "BOOTCFG requires NVRAM"
+# endif
+#endif /* EFSYS_OPT_BOOTCFG */
+
+#if EFSYS_OPT_CHECK_REG
+/* Verify chip implements accessed registers */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "CHECK_REG requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_CHECK_REG */
+
+#if EFSYS_OPT_DECODE_INTR_FATAL
+/* Decode fatal errors */
+# if !EFSYS_OPT_SIENA
+# error "INTR_FATAL requires SIENA"
+# endif
+#endif /* EFSYS_OPT_DECODE_INTR_FATAL */
+
+#if EFSYS_OPT_DIAG
+/* Support diagnostic hardware tests */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "DIAG requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_DIAG */
+
+#if EFSYS_OPT_EV_PREFETCH
+/* Support optimized EVQ data access */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "EV_PREFETCH requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+#ifdef EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE
+# error "FALCON_NIC_CFG_OVERRIDE is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_FILTER
+/* Support hardware packet filters */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "FILTER requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_FILTER */
+
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# if !EFSYS_OPT_FILTER
+# error "HUNTINGTON or MEDFORD requires FILTER"
+# endif
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_LOOPBACK
+/* Support hardware loopback modes */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "LOOPBACK requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#ifdef EFSYS_OPT_MAC_FALCON_GMAC
+# error "MAC_FALCON_GMAC is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MAC_FALCON_XMAC
+# error "MAC_FALCON_XMAC is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_MAC_STATS
+/* Support MAC statistics */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "MAC_STATS requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#if EFSYS_OPT_MCDI
+/* Support management controller messages */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "MCDI requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_MCDI */
+
+#if (EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# if !EFSYS_OPT_MCDI
+# error "SIENA or HUNTINGTON or MEDFORD requires MCDI"
+# endif
+#endif
+
+#if EFSYS_OPT_MCDI_LOGGING
+/* Support MCDI logging */
+# if !EFSYS_OPT_MCDI
+# error "MCDI_LOGGING requires MCDI"
+# endif
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+/* Support MCDI proxy authorization */
+# if !EFSYS_OPT_MCDI
+# error "MCDI_PROXY_AUTH requires MCDI"
+# endif
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+#ifdef EFSYS_OPT_MON_LM87
+# error "MON_LM87 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MON_MAX6647
+# error "MON_MAX6647 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MON_NULL
+# error "MON_NULL is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MON_SIENA
+# error "MON_SIENA is obsolete (replaced by MON_MCDI)."
+#endif
+
+#ifdef EFSYS_OPT_MON_HUNTINGTON
+# error "MON_HUNTINGTON is obsolete (replaced by MON_MCDI)."
+#endif
+
+#if EFSYS_OPT_MON_STATS
+/* Support monitor statistics (voltage/temperature) */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "MON_STATS requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_MON_STATS */
+
+#if EFSYS_OPT_MON_MCDI
+/* Support Monitor via mcdi */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "MON_MCDI requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_MON_MCDI*/
+
+#if EFSYS_OPT_NAMES
+/* Support printable names for statistics */
+# if !(EFSYS_OPT_LOOPBACK || EFSYS_OPT_MAC_STATS || EFSYS_OPT_MCDI || \
+ EFSYS_MON_STATS || EFSYS_OPT_PHY_STATS || EFSYS_OPT_QSTATS)
+# error "NAMES requires LOOPBACK or xxxSTATS or MCDI"
+# endif
+#endif /* EFSYS_OPT_NAMES */
+
+#if EFSYS_OPT_NVRAM
+/* Support non volatile configuration */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "NVRAM requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_NVRAM */
+
+#ifdef EFSYS_OPT_NVRAM_FALCON_BOOTROM
+# error "NVRAM_FALCON_BOOTROM is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_NVRAM_SFT9001
+# error "NVRAM_SFT9001 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_NVRAM_SFX7101
+# error "NVRAM_SFX7101 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PCIE_TUNE
+# error "PCIE_TUNE is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_BIST
+# error "PHY_BIST is obsolete (replaced by BIST)."
+#endif
+
+#if EFSYS_OPT_PHY_FLAGS
+/* Support PHY flags */
+# if !EFSYS_OPT_SIENA
+# error "PHY_FLAGS requires SIENA"
+# endif
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+/* Support for PHY LED control */
+# if !EFSYS_OPT_SIENA
+# error "PHY_LED_CONTROL requires SIENA"
+# endif
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+#ifdef EFSYS_OPT_PHY_NULL
+# error "PHY_NULL is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_PM8358
+# error "PHY_PM8358 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_PROPS
+# error "PHY_PROPS is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_QT2022C2
+# error "PHY_QT2022C2 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_QT2025C
+# error "PHY_QT2025C is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_SFT9001
+# error "PHY_SFT9001 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_SFX7101
+# error "PHY_SFX7101 is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_PHY_STATS
+/* Support PHY statistics */
+# if !EFSYS_OPT_SIENA
+# error "PHY_STATS requires SIENA"
+# endif
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#ifdef EFSYS_OPT_PHY_TXC43128
+# error "PHY_TXC43128 is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_QSTATS
+/* Support EVQ/RXQ/TXQ statistics */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "QSTATS requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_QSTATS */
+
+#ifdef EFSYS_OPT_RX_HDR_SPLIT
+# error "RX_HDR_SPLIT is obsolete and is not supported"
+#endif
+
+#if EFSYS_OPT_RX_SCALE
+/* Support receive scaling (RSS) */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "RX_SCALE requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCATTER
+/* Support receive scatter DMA */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "RX_SCATTER requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#ifdef EFSYS_OPT_STAT_NAME
+# error "STAT_NAME is obsolete (replaced by NAMES)."
+#endif
+
+#if EFSYS_OPT_VPD
+/* Support PCI Vital Product Data (VPD) */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "VPD requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_VPD */
+
+#ifdef EFSYS_OPT_WOL
+# error "WOL is obsolete and is not supported"
+#endif /* EFSYS_OPT_WOL */
+
+#ifdef EFSYS_OPT_MCAST_FILTER_LIST
+# error "MCAST_FILTER_LIST is obsolete and is not supported"
+#endif
+
+#if EFSYS_OPT_BIST
+/* Support BIST */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "BIST requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_BIST */
+
+#if EFSYS_OPT_LICENSING
+/* Support MCDI licensing API */
+# if !EFSYS_OPT_MCDI
+# error "LICENSING requires MCDI"
+# endif
+# if !EFSYS_HAS_UINT64
+# error "LICENSING requires UINT64"
+# endif
+#endif /* EFSYS_OPT_LICENSING */
+
+#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
+/* Support adapters with missing static config (for factory use only) */
+# if !EFSYS_OPT_MEDFORD
+# error "ALLOW_UNCONFIGURED_NIC requires MEDFORD"
+# endif
+#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+/* Support packed stream mode */
+# if !(EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "PACKED_STREAM requires HUNTINGTON or MEDFORD"
+# endif
+#endif
+
+#endif /* _SYS_EFX_CHECK_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_crc32.c b/src/seastar/dpdk/drivers/net/sfc/base/efx_crc32.c
new file mode 100644
index 00000000..27e2708a
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_crc32.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2013-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+/*
+ * Precomputed table for computing IEEE 802.3 CRC32
+ * with polynomial 0x04c11db7 (bit-reversed 0xedb88320)
+ */
+
+static const uint32_t efx_crc32_table[256] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
+ 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
+ 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
+ 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
+ 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
+ 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
+ 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
+ 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
+ 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
+ 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
+ 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
+ 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
+ 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
+ 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
+ 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
+ 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
+ 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
+ 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
+ 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
+ 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
+ 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
+ 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
+ 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
+ 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
+ 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
+ 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
+ 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
+ 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
+ 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
+ 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
+ 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
+ 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
+ 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
+ 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
+ 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
+};
+
+/* Calculate the IEEE 802.3 CRC32 of a MAC addr */
+ __checkReturn uint32_t
+efx_crc32_calculate(
+ __in uint32_t crc_init,
+ __in_ecount(length) uint8_t const *input,
+ __in int length)
+{
+ int index;
+ uint32_t crc = crc_init;
+
+ for (index = 0; index < length; index++) {
+ uint32_t data = *(input++);
+ crc = (crc >> 8) ^ efx_crc32_table[(crc ^ data) & 0xff];
+ }
+
+ return (crc);
+}
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_ev.c b/src/seastar/dpdk/drivers/net/sfc/base/efx_ev.c
new file mode 100644
index 00000000..42ded5aa
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_ev.c
@@ -0,0 +1,1470 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_QSTATS
+#define EFX_EV_QSTAT_INCR(_eep, _stat) \
+ do { \
+ (_eep)->ee_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_EV_QSTAT_INCR(_eep, _stat)
+#endif
+
+#define EFX_EV_PRESENT(_qword) \
+ (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \
+ EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff)
+
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_ev_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_ev_fini(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in efx_evq_t *eep);
+
+static void
+siena_ev_qdestroy(
+ __in efx_evq_t *eep);
+
+static __checkReturn efx_rc_t
+siena_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+static void
+siena_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data);
+
+static __checkReturn efx_rc_t
+siena_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us);
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
+
+#endif
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+static const efx_ev_ops_t __efx_ev_siena_ops = {
+ siena_ev_init, /* eevo_init */
+ siena_ev_fini, /* eevo_fini */
+ siena_ev_qcreate, /* eevo_qcreate */
+ siena_ev_qdestroy, /* eevo_qdestroy */
+ siena_ev_qprime, /* eevo_qprime */
+ siena_ev_qpost, /* eevo_qpost */
+ siena_ev_qmoderate, /* eevo_qmoderate */
+#if EFSYS_OPT_QSTATS
+ siena_ev_qstats_update, /* eevo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_ev_ops_t __efx_ev_ef10_ops = {
+ ef10_ev_init, /* eevo_init */
+ ef10_ev_fini, /* eevo_fini */
+ ef10_ev_qcreate, /* eevo_qcreate */
+ ef10_ev_qdestroy, /* eevo_qdestroy */
+ ef10_ev_qprime, /* eevo_qprime */
+ ef10_ev_qpost, /* eevo_qpost */
+ ef10_ev_qmoderate, /* eevo_qmoderate */
+#if EFSYS_OPT_QSTATS
+ ef10_ev_qstats_update, /* eevo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+
+ __checkReturn efx_rc_t
+efx_ev_init(
+ __in efx_nic_t *enp)
+{
+ const efx_ev_ops_t *eevop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ if (enp->en_mod_flags & EFX_MOD_EV) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ eevop = &__efx_ev_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ eevop = &__efx_ev_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ eevop = &__efx_ev_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
+
+ if ((rc = eevop->eevo_init(enp)) != 0)
+ goto fail2;
+
+ enp->en_eevop = eevop;
+ enp->en_mod_flags |= EFX_MOD_EV;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_eevop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_EV;
+ return (rc);
+}
+
+ void
+efx_ev_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+ EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
+
+ eevop->eevo_fini(enp);
+
+ enp->en_eevop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_EV;
+}
+
+
+ __checkReturn efx_rc_t
+efx_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __deref_out efx_evq_t **eepp)
+{
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_evq_t *eep;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
+
+ EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <, encp->enc_evq_limit);
+
+ switch (flags & EFX_EVQ_FLAGS_NOTIFY_MASK) {
+ case EFX_EVQ_FLAGS_NOTIFY_INTERRUPT:
+ break;
+ case EFX_EVQ_FLAGS_NOTIFY_DISABLED:
+ if (us != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ /* Allocate an EVQ object */
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep);
+ if (eep == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ eep->ee_magic = EFX_EVQ_MAGIC;
+ eep->ee_enp = enp;
+ eep->ee_index = index;
+ eep->ee_mask = n - 1;
+ eep->ee_flags = flags;
+ eep->ee_esmp = esmp;
+
+ /*
+ * Set outputs before the queue is created because interrupts may be
+ * raised for events immediately after the queue is created, before the
+ * function call below returns. See bug58606.
+ *
+ * The eepp pointer passed in by the client must therefore point to data
+ * shared with the client's event processing context.
+ */
+ enp->en_ev_qcount++;
+ *eepp = eep;
+
+ if ((rc = eevop->eevo_qcreate(enp, index, esmp, n, id, us, flags,
+ eep)) != 0)
+ goto fail4;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+ *eepp = NULL;
+ enp->en_ev_qcount--;
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_ev_qdestroy(
+ __in efx_evq_t *eep)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ EFSYS_ASSERT(enp->en_ev_qcount != 0);
+ --enp->en_ev_qcount;
+
+ eevop->eevo_qdestroy(eep);
+
+ /* Free the EVQ object */
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
+}
+
+ __checkReturn efx_rc_t
+efx_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ if (!(enp->en_mod_flags & EFX_MOD_INTR)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = eevop->eevo_qprime(eep, count)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn boolean_t
+efx_ev_qpending(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ size_t offset;
+ efx_qword_t qword;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword);
+
+ return (EFX_EV_PRESENT(qword));
+}
+
+#if EFSYS_OPT_EV_PREFETCH
+
+ void
+efx_ev_qprefetch(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ unsigned int offset;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
+}
+
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+#define EFX_EV_BATCH 8
+
+ void
+efx_ev_qpoll(
+ __in efx_evq_t *eep,
+ __inout unsigned int *countp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_qword_t ev[EFX_EV_BATCH];
+ unsigned int batch;
+ unsigned int total;
+ unsigned int count;
+ unsigned int index;
+ size_t offset;
+
+ /* Ensure events codes match for EF10 (Huntington/Medford) and Siena */
+ EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN);
+ EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH);
+
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV);
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV);
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV);
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV ==
+ FSE_AZ_EV_CODE_DRV_GEN_EV);
+#if EFSYS_OPT_MCDI
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV ==
+ FSE_AZ_EV_CODE_MCDI_EVRESPONSE);
+#endif
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+ EFSYS_ASSERT(countp != NULL);
+ EFSYS_ASSERT(eecp != NULL);
+
+ count = *countp;
+ do {
+ /* Read up until the end of the batch period */
+ batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1));
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ for (total = 0; total < batch; ++total) {
+ EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
+
+ if (!EFX_EV_PRESENT(ev[total]))
+ break;
+
+ EFSYS_PROBE3(event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
+
+ offset += sizeof (efx_qword_t);
+ }
+
+#if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1)
+ /*
+ * Prefetch the next batch when we get within PREFETCH_PERIOD
+ * of a completed batch. If the batch is smaller, then prefetch
+ * immediately.
+ */
+ if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD)
+ EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+ /* Process the batch of events */
+ for (index = 0; index < total; ++index) {
+ boolean_t should_abort;
+ uint32_t code;
+
+#if EFSYS_OPT_EV_PREFETCH
+ /* Prefetch if we've now reached the batch period */
+ if (total == batch &&
+ index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) {
+ offset = (count + batch) & eep->ee_mask;
+ offset *= sizeof (efx_qword_t);
+
+ EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
+ }
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+ EFX_EV_QSTAT_INCR(eep, EV_ALL);
+
+ code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE);
+ switch (code) {
+ case FSE_AZ_EV_CODE_RX_EV:
+ should_abort = eep->ee_rx(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ case FSE_AZ_EV_CODE_TX_EV:
+ should_abort = eep->ee_tx(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ case FSE_AZ_EV_CODE_DRIVER_EV:
+ should_abort = eep->ee_driver(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ case FSE_AZ_EV_CODE_DRV_GEN_EV:
+ should_abort = eep->ee_drv_gen(eep,
+ &(ev[index]), eecp, arg);
+ break;
+#if EFSYS_OPT_MCDI
+ case FSE_AZ_EV_CODE_MCDI_EVRESPONSE:
+ should_abort = eep->ee_mcdi(eep,
+ &(ev[index]), eecp, arg);
+ break;
+#endif
+ case FSE_AZ_EV_CODE_GLOBAL_EV:
+ if (eep->ee_global) {
+ should_abort = eep->ee_global(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ }
+ /* else fallthrough */
+ default:
+ EFSYS_PROBE3(bad_event,
+ unsigned int, eep->ee_index,
+ uint32_t,
+ EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
+ uint32_t,
+ EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
+
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+ (void) eecp->eec_exception(arg,
+ EFX_EXCEPTION_EV_ERROR, code);
+ should_abort = B_TRUE;
+ }
+ if (should_abort) {
+ /* Ignore subsequent events */
+ total = index + 1;
+ break;
+ }
+ }
+
+ /*
+ * Now that the hardware has most likely moved onto dma'ing
+ * into the next cache line, clear the processed events. Take
+ * care to only clear out events that we've processed
+ */
+ EFX_SET_QWORD(ev[0]);
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ for (index = 0; index < total; ++index) {
+ EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0]));
+ offset += sizeof (efx_qword_t);
+ }
+
+ count += total;
+
+ } while (total == batch);
+
+ *countp = count;
+}
+
+ void
+efx_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ EFSYS_ASSERT(eevop != NULL &&
+ eevop->eevo_qpost != NULL);
+
+ eevop->eevo_qpost(eep, data);
+}
+
+ __checkReturn efx_rc_t
+efx_ev_usecs_to_ticks(
+ __in efx_nic_t *enp,
+ __in unsigned int us,
+ __out unsigned int *ticksp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ unsigned int ticks;
+
+ /* Convert microseconds to a timer tick count */
+ if (us == 0)
+ ticks = 0;
+ else if (us * 1000 < encp->enc_evq_timer_quantum_ns)
+ ticks = 1; /* Never round down to zero */
+ else
+ ticks = us * 1000 / encp->enc_evq_timer_quantum_ns;
+
+ *ticksp = ticks;
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+efx_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ if ((eep->ee_flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+ EFX_EVQ_FLAGS_NOTIFY_DISABLED) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = eevop->eevo_qmoderate(eep, us)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+#if EFSYS_OPT_QSTATS
+ void
+efx_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
+
+{ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ eevop->eevo_qstats_update(eep, stat);
+}
+
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_ev_init(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /*
+ * Program the event queue for receive and transmit queue
+ * flush events.
+ */
+ EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword);
+
+ return (0);
+
+}
+
+static __checkReturn boolean_t
+siena_ev_rx_not_ok(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in uint32_t label,
+ __in uint32_t id,
+ __inout uint16_t *flagsp)
+{
+ boolean_t ignore = B_FALSE;
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC);
+ EFSYS_PROBE(tobe_disc);
+ /*
+ * Assume this is a unicast address mismatch, unless below
+ * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or
+ * EV_RX_PAUSE_FRM_ERR is set.
+ */
+ (*flagsp) |= EFX_ADDR_MISMATCH;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) {
+ EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id);
+ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
+ (*flagsp) |= EFX_DISCARD;
+
+#if EFSYS_OPT_RX_SCATTER
+ /*
+ * Lookout for payload queue ran dry errors and ignore them.
+ *
+ * Sadly for the header/data split cases, the descriptor
+ * pointer in this event refers to the header queue and
+ * therefore cannot be easily detected as duplicate.
+ * So we drop these and rely on the receive processing seeing
+ * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard
+ * the partially received packet.
+ */
+ if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) &&
+ (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) &&
+ (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0))
+ ignore = B_TRUE;
+#endif /* EFSYS_OPT_RX_SCATTER */
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
+ EFSYS_PROBE(crc_err);
+ (*flagsp) &= ~EFX_ADDR_MISMATCH;
+ (*flagsp) |= EFX_DISCARD;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR);
+ EFSYS_PROBE(pause_frm_err);
+ (*flagsp) &= ~EFX_ADDR_MISMATCH;
+ (*flagsp) |= EFX_DISCARD;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR);
+ EFSYS_PROBE(owner_id_err);
+ (*flagsp) |= EFX_DISCARD;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
+ EFSYS_PROBE(ipv4_err);
+ (*flagsp) &= ~EFX_CKSUM_IPV4;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
+ EFSYS_PROBE(udp_chk_err);
+ (*flagsp) &= ~EFX_CKSUM_TCPUDP;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR);
+
+ /*
+ * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This
+ * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error
+ * condition.
+ */
+ (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP);
+ }
+
+ return (ignore);
+}
+
+static __checkReturn boolean_t
+siena_ev_rx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t id;
+ uint32_t size;
+ uint32_t label;
+ boolean_t ok;
+#if EFSYS_OPT_RX_SCATTER
+ boolean_t sop;
+ boolean_t jumbo_cont;
+#endif /* EFSYS_OPT_RX_SCATTER */
+ uint32_t hdr_type;
+ boolean_t is_v6;
+ uint16_t flags;
+ boolean_t ignore;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_RX);
+
+ /* Basic packet information */
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR);
+ size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT);
+ label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL);
+ ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0);
+
+#if EFSYS_OPT_RX_SCATTER
+ sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0);
+ jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+ hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE);
+
+ is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
+
+ /*
+ * If packet is marked as OK and packet type is TCP/IP or
+ * UDP/IP or other IP, then we can rely on the hardware checksums.
+ */
+ switch (hdr_type) {
+ case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
+ flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP;
+ if (is_v6) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
+ flags |= EFX_PKT_IPV6;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
+ flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
+ }
+ break;
+
+ case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
+ flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP;
+ if (is_v6) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
+ flags |= EFX_PKT_IPV6;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
+ flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
+ }
+ break;
+
+ case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
+ if (is_v6) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
+ flags = EFX_PKT_IPV6;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
+ flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
+ }
+ break;
+
+ case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
+ EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
+ flags = 0;
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ flags = 0;
+ break;
+ }
+
+#if EFSYS_OPT_RX_SCATTER
+ /* Report scatter and header/lookahead split buffer flags */
+ if (sop)
+ flags |= EFX_PKT_START;
+ if (jumbo_cont)
+ flags |= EFX_PKT_CONT;
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+ /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
+ if (!ok) {
+ ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags);
+ if (ignore) {
+ EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
+ uint32_t, size, uint16_t, flags);
+
+ return (B_FALSE);
+ }
+ }
+
+ /* If we're not discarding the packet then it is ok */
+ if (~flags & EFX_DISCARD)
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
+
+ /* Detect multicast packets that didn't match the filter */
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH);
+ } else {
+ EFSYS_PROBE(mcast_mismatch);
+ flags |= EFX_ADDR_MISMATCH;
+ }
+ } else {
+ flags |= EFX_PKT_UNICAST;
+ }
+
+ /*
+ * The packet parser in Siena can abort parsing packets under
+ * certain error conditions, setting the PKT_NOT_PARSED bit
+ * (which clears PKT_OK). If this is set, then don't trust
+ * the PKT_TYPE field.
+ */
+ if (!ok) {
+ uint32_t parse_err;
+
+ parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED);
+ if (parse_err != 0)
+ flags |= EFX_CHECK_VLAN;
+ }
+
+ if (~flags & EFX_CHECK_VLAN) {
+ uint32_t pkt_type;
+
+ pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE);
+ if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN)
+ flags |= EFX_PKT_VLAN_TAGGED;
+ }
+
+ EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
+ uint32_t, size, uint16_t, flags);
+
+ EFSYS_ASSERT(eecp->eec_rx != NULL);
+ should_abort = eecp->eec_rx(arg, label, id, size, flags);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+siena_ev_tx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t id;
+ uint32_t label;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_TX);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 &&
+ EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 &&
+ EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 &&
+ EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) {
+
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR);
+ label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL);
+
+ EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
+
+ EFSYS_ASSERT(eecp->eec_tx != NULL);
+ should_abort = eecp->eec_tx(arg, label, id);
+
+ return (should_abort);
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0)
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0)
+ EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0)
+ EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0)
+ EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL);
+
+ EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED);
+ return (B_FALSE);
+}
+
+static __checkReturn boolean_t
+siena_ev_global(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ _NOTE(ARGUNUSED(eqp, eecp, arg))
+
+ EFX_EV_QSTAT_INCR(eep, EV_GLOBAL);
+
+ return (B_FALSE);
+}
+
+static __checkReturn boolean_t
+siena_ev_driver(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
+ should_abort = B_FALSE;
+
+ switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) {
+ case FSE_AZ_TX_DESCQ_FLS_DONE_EV: {
+ uint32_t txq_index;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
+
+ txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
+
+ EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
+ should_abort = eecp->eec_txq_flush_done(arg, txq_index);
+
+ break;
+ }
+ case FSE_AZ_RX_DESCQ_FLS_DONE_EV: {
+ uint32_t rxq_index;
+ uint32_t failed;
+
+ rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
+ failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
+
+ EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
+ EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL);
+
+ if (failed) {
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED);
+
+ EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index);
+
+ should_abort = eecp->eec_rxq_flush_failed(arg,
+ rxq_index);
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
+
+ EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
+
+ should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
+ }
+
+ break;
+ }
+ case FSE_AZ_EVQ_INIT_DONE_EV:
+ EFSYS_ASSERT(eecp->eec_initialized != NULL);
+ should_abort = eecp->eec_initialized(arg);
+
+ break;
+
+ case FSE_AZ_EVQ_NOT_EN_EV:
+ EFSYS_PROBE(evq_not_en);
+ break;
+
+ case FSE_AZ_SRM_UPD_DONE_EV: {
+ uint32_t code;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE);
+
+ code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_ASSERT(eecp->eec_sram != NULL);
+ should_abort = eecp->eec_sram(arg, code);
+
+ break;
+ }
+ case FSE_AZ_WAKE_UP_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_ASSERT(eecp->eec_wake_up != NULL);
+ should_abort = eecp->eec_wake_up(arg, id);
+
+ break;
+ }
+ case FSE_AZ_TX_PKT_NON_TCP_UDP:
+ EFSYS_PROBE(tx_pkt_non_tcp_udp);
+ break;
+
+ case FSE_AZ_TIMER_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_ASSERT(eecp->eec_timer != NULL);
+ should_abort = eecp->eec_timer(arg, id);
+
+ break;
+ }
+ case FSE_AZ_RX_DSC_ERROR_EV:
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR);
+
+ EFSYS_PROBE(rx_dsc_error);
+
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_RX_DSC_ERROR, 0);
+
+ break;
+
+ case FSE_AZ_TX_DSC_ERROR_EV:
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR);
+
+ EFSYS_PROBE(tx_dsc_error);
+
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_TX_DSC_ERROR, 0);
+
+ break;
+
+ default:
+ break;
+ }
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+siena_ev_drv_gen(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t data;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
+
+ data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0);
+ if (data >= ((uint32_t)1 << 16)) {
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+ return (B_TRUE);
+ }
+
+ EFSYS_ASSERT(eecp->eec_software != NULL);
+ should_abort = eecp->eec_software(arg, (uint16_t)data);
+
+ return (should_abort);
+}
+
+#if EFSYS_OPT_MCDI
+
+static __checkReturn boolean_t
+siena_ev_mcdi(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ unsigned int code;
+ boolean_t should_abort = B_FALSE;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ if (enp->en_family != EFX_FAMILY_SIENA)
+ goto out;
+
+ EFSYS_ASSERT(eecp->eec_link_change != NULL);
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+#if EFSYS_OPT_MON_STATS
+ EFSYS_ASSERT(eecp->eec_monitor != NULL);
+#endif
+
+ EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
+
+ code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
+ switch (code) {
+ case MCDI_EVENT_CODE_BADSSERT:
+ efx_mcdi_ev_death(enp, EINTR);
+ break;
+
+ case MCDI_EVENT_CODE_CMDDONE:
+ efx_mcdi_ev_cpl(enp,
+ MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
+ MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
+ MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
+ break;
+
+ case MCDI_EVENT_CODE_LINKCHANGE: {
+ efx_link_mode_t link_mode;
+
+ siena_phy_link_ev(enp, eqp, &link_mode);
+ should_abort = eecp->eec_link_change(arg, link_mode);
+ break;
+ }
+ case MCDI_EVENT_CODE_SENSOREVT: {
+#if EFSYS_OPT_MON_STATS
+ efx_mon_stat_t id;
+ efx_mon_stat_value_t value;
+ efx_rc_t rc;
+
+ if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0)
+ should_abort = eecp->eec_monitor(arg, id, value);
+ else if (rc == ENOTSUP) {
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_SENSOREVT,
+ MCDI_EV_FIELD(eqp, DATA));
+ } else
+ EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
+#else
+ should_abort = B_FALSE;
+#endif
+ break;
+ }
+ case MCDI_EVENT_CODE_SCHEDERR:
+ /* Informational only */
+ break;
+
+ case MCDI_EVENT_CODE_REBOOT:
+ efx_mcdi_ev_death(enp, EIO);
+ break;
+
+ case MCDI_EVENT_CODE_MAC_STATS_DMA:
+#if EFSYS_OPT_MAC_STATS
+ if (eecp->eec_mac_stats != NULL) {
+ eecp->eec_mac_stats(arg,
+ MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
+ }
+#endif
+ break;
+
+ case MCDI_EVENT_CODE_FWALERT: {
+ uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
+
+ if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_FWALERT_SRAM,
+ MCDI_EV_FIELD(eqp, FWALERT_DATA));
+ else
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_FWALERT,
+ MCDI_EV_FIELD(eqp, DATA));
+ break;
+ }
+
+ default:
+ EFSYS_PROBE1(mc_pcol_error, int, code);
+ break;
+ }
+
+out:
+ return (should_abort);
+}
+
+#endif /* EFSYS_OPT_MCDI */
+
+static __checkReturn efx_rc_t
+siena_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t rptr;
+ efx_dword_t dword;
+
+ rptr = count & eep->ee_mask;
+
+ EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr);
+
+ EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index,
+ &dword, B_FALSE);
+
+ return (0);
+}
+
+static void
+siena_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_qword_t ev;
+ efx_oword_t oword;
+
+ EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_EV_DATA_DW0, (uint32_t)data);
+
+ EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index,
+ EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0),
+ EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1));
+
+ EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword);
+}
+
+static __checkReturn efx_rc_t
+siena_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ unsigned int locked;
+ efx_dword_t dword;
+ efx_rc_t rc;
+
+ if (us > encp->enc_evq_timer_max_us) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* If the value is zero then disable the timer */
+ if (us == 0) {
+ EFX_POPULATE_DWORD_2(dword,
+ FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
+ FRF_CZ_TC_TIMER_VAL, 0);
+ } else {
+ unsigned int ticks;
+
+ if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
+ goto fail2;
+
+ EFSYS_ASSERT(ticks > 0);
+ EFX_POPULATE_DWORD_2(dword,
+ FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
+ FRF_CZ_TC_TIMER_VAL, ticks - 1);
+ }
+
+ locked = (eep->ee_index == 0) ? 1 : 0;
+
+ EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0,
+ eep->ee_index, &dword, locked);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in efx_evq_t *eep)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t size;
+ efx_oword_t oword;
+ efx_rc_t rc;
+ boolean_t notify_mode;
+
+ _NOTE(ARGUNUSED(esmp))
+
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
+
+ if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_evq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+#if EFSYS_OPT_RX_SCALE
+ if (enp->en_intr.ei_type == EFX_INTR_LINE &&
+ index >= EFX_MAXRSS_LEGACY) {
+ rc = EINVAL;
+ goto fail3;
+ }
+#endif
+ for (size = 0; (1 << size) <= (EFX_EVQ_MAXNEVS / EFX_EVQ_MINNEVS);
+ size++)
+ if ((1 << size) == (int)(n / EFX_EVQ_MINNEVS))
+ break;
+ if (id + (1 << size) >= encp->enc_buftbl_limit) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /* Set up the handler table */
+ eep->ee_rx = siena_ev_rx;
+ eep->ee_tx = siena_ev_tx;
+ eep->ee_driver = siena_ev_driver;
+ eep->ee_global = siena_ev_global;
+ eep->ee_drv_gen = siena_ev_drv_gen;
+#if EFSYS_OPT_MCDI
+ eep->ee_mcdi = siena_ev_mcdi;
+#endif /* EFSYS_OPT_MCDI */
+
+ notify_mode = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) !=
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
+
+ /* Set up the new event queue */
+ EFX_POPULATE_OWORD_3(oword, FRF_CZ_TIMER_Q_EN, 1,
+ FRF_CZ_HOST_NOTIFY_MODE, notify_mode,
+ FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE);
+
+ EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size,
+ FRF_AZ_EVQ_BUF_BASE_ID, id);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE);
+
+ /* Set initial interrupt moderation */
+ siena_ev_qmoderate(eep, us);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+#if EFSYS_OPT_RX_SCALE
+fail3:
+ EFSYS_PROBE(fail3);
+#endif
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_QSTATS
+#if EFSYS_OPT_NAMES
+/* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock c0f3bc5083b40532 */
+static const char * const __efx_ev_qstat_name[] = {
+ "all",
+ "rx",
+ "rx_ok",
+ "rx_frm_trunc",
+ "rx_tobe_disc",
+ "rx_pause_frm_err",
+ "rx_buf_owner_id_err",
+ "rx_ipv4_hdr_chksum_err",
+ "rx_tcp_udp_chksum_err",
+ "rx_eth_crc_err",
+ "rx_ip_frag_err",
+ "rx_mcast_pkt",
+ "rx_mcast_hash_match",
+ "rx_tcp_ipv4",
+ "rx_tcp_ipv6",
+ "rx_udp_ipv4",
+ "rx_udp_ipv6",
+ "rx_other_ipv4",
+ "rx_other_ipv6",
+ "rx_non_ip",
+ "rx_batch",
+ "tx",
+ "tx_wq_ff_full",
+ "tx_pkt_err",
+ "tx_pkt_too_big",
+ "tx_unexpected",
+ "global",
+ "global_mnt",
+ "driver",
+ "driver_srm_upd_done",
+ "driver_tx_descq_fls_done",
+ "driver_rx_descq_fls_done",
+ "driver_rx_descq_fls_failed",
+ "driver_rx_dsc_error",
+ "driver_tx_dsc_error",
+ "drv_gen",
+ "mcdi_response",
+};
+/* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */
+
+ const char *
+efx_ev_qstat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(id, <, EV_NQSTATS);
+
+ return (__efx_ev_qstat_name[id]);
+}
+#endif /* EFSYS_OPT_NAMES */
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < EV_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
+ eep->ee_stat[id] = 0;
+ }
+}
+#endif /* EFSYS_OPT_QSTATS */
+
+static void
+siena_ev_qdestroy(
+ __in efx_evq_t *eep)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_oword_t oword;
+
+ /* Purge event queue */
+ EFX_ZERO_OWORD(oword);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL,
+ eep->ee_index, &oword, B_TRUE);
+
+ EFX_ZERO_OWORD(oword);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE);
+}
+
+static void
+siena_ev_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_filter.c b/src/seastar/dpdk/drivers/net/sfc/base/efx_filter.c
new file mode 100644
index 00000000..ba310260
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_filter.c
@@ -0,0 +1,1424 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_FILTER
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_filter_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_filter_fini(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_filter_restore(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace);
+
+static __checkReturn efx_rc_t
+siena_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+static __checkReturn efx_rc_t
+siena_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp);
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+static const efx_filter_ops_t __efx_filter_siena_ops = {
+ siena_filter_init, /* efo_init */
+ siena_filter_fini, /* efo_fini */
+ siena_filter_restore, /* efo_restore */
+ siena_filter_add, /* efo_add */
+ siena_filter_delete, /* efo_delete */
+ siena_filter_supported_filters, /* efo_supported_filters */
+ NULL, /* efo_reconfigure */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_filter_ops_t __efx_filter_ef10_ops = {
+ ef10_filter_init, /* efo_init */
+ ef10_filter_fini, /* efo_fini */
+ ef10_filter_restore, /* efo_restore */
+ ef10_filter_add, /* efo_add */
+ ef10_filter_delete, /* efo_delete */
+ ef10_filter_supported_filters, /* efo_supported_filters */
+ ef10_filter_reconfigure, /* efo_reconfigure */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_filter_insert(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ const efx_filter_ops_t *efop = enp->en_efop;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX);
+
+ return (efop->efo_add(enp, spec, B_FALSE));
+}
+
+ __checkReturn efx_rc_t
+efx_filter_remove(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ const efx_filter_ops_t *efop = enp->en_efop;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX);
+
+#if EFSYS_OPT_RX_SCALE
+ spec->efs_rss_context = enp->en_rss_context;
+#endif
+
+ return (efop->efo_delete(enp, spec));
+}
+
+ __checkReturn efx_rc_t
+efx_filter_restore(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+
+ if ((rc = enp->en_efop->efo_restore(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_filter_init(
+ __in efx_nic_t *enp)
+{
+ const efx_filter_ops_t *efop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_FILTER));
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ efop = &__efx_filter_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ efop = &__efx_filter_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ efop = &__efx_filter_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efop->efo_init(enp)) != 0)
+ goto fail2;
+
+ enp->en_efop = efop;
+ enp->en_mod_flags |= EFX_MOD_FILTER;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_efop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_FILTER;
+ return (rc);
+}
+
+ void
+efx_filter_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+
+ enp->en_efop->efo_fini(enp);
+
+ enp->en_efop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_FILTER;
+}
+
+/*
+ * Query the possible combinations of match flags which can be filtered on.
+ * These are returned as a list, of which each 32 bit element is a bitmask
+ * formed of EFX_FILTER_MATCH flags.
+ *
+ * The combinations are ordered in priority from highest to lowest.
+ *
+ * If the provided buffer is too short to hold the list, the call with fail with
+ * ENOSPC and *list_lengthp will be set to the buffer length required.
+ */
+ __checkReturn efx_rc_t
+efx_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+ EFSYS_ASSERT(enp->en_efop->efo_supported_filters != NULL);
+
+ if (buffer == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ rc = enp->en_efop->efo_supported_filters(enp, buffer, buffer_length,
+ list_lengthp);
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+
+ if (enp->en_efop->efo_reconfigure != NULL) {
+ if ((rc = enp->en_efop->efo_reconfigure(enp, mac_addr,
+ all_unicst, mulcst,
+ all_mulcst, brdcst,
+ addrs, count)) != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_filter_spec_init_rx(
+ __out efx_filter_spec_t *spec,
+ __in efx_filter_priority_t priority,
+ __in efx_filter_flags_t flags,
+ __in efx_rxq_t *erp)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(erp, !=, NULL);
+ EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_SCATTER)) == 0);
+
+ memset(spec, 0, sizeof (*spec));
+ spec->efs_priority = priority;
+ spec->efs_flags = EFX_FILTER_FLAG_RX | flags;
+ spec->efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
+ spec->efs_dmaq_id = (uint16_t)erp->er_index;
+}
+
+ void
+efx_filter_spec_init_tx(
+ __out efx_filter_spec_t *spec,
+ __in efx_txq_t *etp)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(etp, !=, NULL);
+
+ memset(spec, 0, sizeof (*spec));
+ spec->efs_priority = EFX_FILTER_PRI_REQUIRED;
+ spec->efs_flags = EFX_FILTER_FLAG_TX;
+ spec->efs_dmaq_id = (uint16_t)etp->et_index;
+}
+
+
+/*
+ * Specify IPv4 host, transport protocol and port in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t host,
+ __in uint16_t port)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+ spec->efs_ether_type = EFX_ETHER_TYPE_IPV4;
+ spec->efs_ip_proto = proto;
+ spec->efs_loc_host.eo_u32[0] = host;
+ spec->efs_loc_port = port;
+ return (0);
+}
+
+/*
+ * Specify IPv4 hosts, transport protocol and ports in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_full(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t lhost,
+ __in uint16_t lport,
+ __in uint32_t rhost,
+ __in uint16_t rport)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+ spec->efs_ether_type = EFX_ETHER_TYPE_IPV4;
+ spec->efs_ip_proto = proto;
+ spec->efs_loc_host.eo_u32[0] = lhost;
+ spec->efs_loc_port = lport;
+ spec->efs_rem_host.eo_u32[0] = rhost;
+ spec->efs_rem_port = rport;
+ return (0);
+}
+
+/*
+ * Specify local Ethernet address and/or VID in filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_eth_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint16_t vid,
+ __in const uint8_t *addr)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(addr, !=, NULL);
+
+ if (vid == EFX_FILTER_SPEC_VID_UNSPEC && addr == NULL)
+ return (EINVAL);
+
+ if (vid != EFX_FILTER_SPEC_VID_UNSPEC) {
+ spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ spec->efs_outer_vid = vid;
+ }
+ if (addr != NULL) {
+ spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+ memcpy(spec->efs_loc_mac, addr, EFX_MAC_ADDR_LEN);
+ }
+ return (0);
+}
+
+/*
+ * Specify matching otherwise-unmatched unicast in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_uc_def(
+ __inout efx_filter_spec_t *spec)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |= EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
+ return (0);
+}
+
+/*
+ * Specify matching otherwise-unmatched multicast in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_mc_def(
+ __inout efx_filter_spec_t *spec)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |= EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
+ return (0);
+}
+
+
+
+#if EFSYS_OPT_SIENA
+
+/*
+ * "Fudge factors" - difference between programmed value and actual depth.
+ * Due to pipelined implementation we need to program H/W with a value that
+ * is larger than the hop limit we want.
+ */
+#define FILTER_CTL_SRCH_FUDGE_WILD 3
+#define FILTER_CTL_SRCH_FUDGE_FULL 1
+
+/*
+ * Hard maximum hop limit. Hardware will time-out beyond 200-something.
+ * We also need to avoid infinite loops in efx_filter_search() when the
+ * table is full.
+ */
+#define FILTER_CTL_SRCH_MAX 200
+
+static __checkReturn efx_rc_t
+siena_filter_spec_from_gen_spec(
+ __out siena_filter_spec_t *sf_spec,
+ __in efx_filter_spec_t *gen_spec)
+{
+ efx_rc_t rc;
+ boolean_t is_full = B_FALSE;
+
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX)
+ EFSYS_ASSERT3U(gen_spec->efs_flags, ==, EFX_FILTER_FLAG_TX);
+ else
+ EFSYS_ASSERT3U(gen_spec->efs_flags, &, EFX_FILTER_FLAG_RX);
+
+ /* Falconsiena only has one RSS context */
+ if ((gen_spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) &&
+ gen_spec->efs_rss_context != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ sf_spec->sfs_flags = gen_spec->efs_flags;
+ sf_spec->sfs_dmaq_id = gen_spec->efs_dmaq_id;
+
+ switch (gen_spec->efs_match_flags) {
+ case EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT:
+ is_full = B_TRUE;
+ /* Fall through */
+ case EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT: {
+ uint32_t rhost, host1, host2;
+ uint16_t rport, port1, port2;
+
+ if (gen_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ if (gen_spec->efs_loc_port == 0 ||
+ (is_full && gen_spec->efs_rem_port == 0)) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ switch (gen_spec->efs_ip_proto) {
+ case EFX_IPPROTO_TCP:
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_TX_TCP_FULL :
+ EFX_SIENA_FILTER_TX_TCP_WILD);
+ } else {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_RX_TCP_FULL :
+ EFX_SIENA_FILTER_RX_TCP_WILD);
+ }
+ break;
+ case EFX_IPPROTO_UDP:
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_TX_UDP_FULL :
+ EFX_SIENA_FILTER_TX_UDP_WILD);
+ } else {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_RX_UDP_FULL :
+ EFX_SIENA_FILTER_RX_UDP_WILD);
+ }
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail4;
+ }
+ /*
+ * The filter is constructed in terms of source and destination,
+ * with the odd wrinkle that the ports are swapped in a UDP
+ * wildcard filter. We need to convert from local and remote
+ * addresses (zero for a wildcard).
+ */
+ rhost = is_full ? gen_spec->efs_rem_host.eo_u32[0] : 0;
+ rport = is_full ? gen_spec->efs_rem_port : 0;
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ host1 = gen_spec->efs_loc_host.eo_u32[0];
+ host2 = rhost;
+ } else {
+ host1 = rhost;
+ host2 = gen_spec->efs_loc_host.eo_u32[0];
+ }
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ if (sf_spec->sfs_type ==
+ EFX_SIENA_FILTER_TX_UDP_WILD) {
+ port1 = rport;
+ port2 = gen_spec->efs_loc_port;
+ } else {
+ port1 = gen_spec->efs_loc_port;
+ port2 = rport;
+ }
+ } else {
+ if (sf_spec->sfs_type ==
+ EFX_SIENA_FILTER_RX_UDP_WILD) {
+ port1 = gen_spec->efs_loc_port;
+ port2 = rport;
+ } else {
+ port1 = rport;
+ port2 = gen_spec->efs_loc_port;
+ }
+ }
+ sf_spec->sfs_dword[0] = (host1 << 16) | port1;
+ sf_spec->sfs_dword[1] = (port2 << 16) | (host1 >> 16);
+ sf_spec->sfs_dword[2] = host2;
+ break;
+ }
+
+ case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
+ is_full = B_TRUE;
+ /* Fall through */
+ case EFX_FILTER_MATCH_LOC_MAC:
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_TX_MAC_FULL :
+ EFX_SIENA_FILTER_TX_MAC_WILD);
+ } else {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_RX_MAC_FULL :
+ EFX_SIENA_FILTER_RX_MAC_WILD);
+ }
+ sf_spec->sfs_dword[0] = is_full ? gen_spec->efs_outer_vid : 0;
+ sf_spec->sfs_dword[1] =
+ gen_spec->efs_loc_mac[2] << 24 |
+ gen_spec->efs_loc_mac[3] << 16 |
+ gen_spec->efs_loc_mac[4] << 8 |
+ gen_spec->efs_loc_mac[5];
+ sf_spec->sfs_dword[2] =
+ gen_spec->efs_loc_mac[0] << 8 |
+ gen_spec->efs_loc_mac[1];
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ rc = ENOTSUP;
+ goto fail5;
+ }
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
+ * key derived from the n-tuple.
+ */
+static uint16_t
+siena_filter_tbl_hash(
+ __in uint32_t key)
+{
+ uint16_t tmp;
+
+ /* First 16 rounds */
+ tmp = 0x1fff ^ (uint16_t)(key >> 16);
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+
+ /* Last 16 rounds */
+ tmp = tmp ^ tmp << 13 ^ (uint16_t)(key & 0xffff);
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+
+ return (tmp);
+}
+
+/*
+ * To allow for hash collisions, filter search continues at these
+ * increments from the first possible entry selected by the hash.
+ */
+static uint16_t
+siena_filter_tbl_increment(
+ __in uint32_t key)
+{
+ return ((uint16_t)(key * 2 - 1));
+}
+
+static __checkReturn boolean_t
+siena_filter_test_used(
+ __in siena_filter_tbl_t *sftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL);
+ return ((sftp->sft_bitmap[index / 32] & (1 << (index % 32))) != 0);
+}
+
+static void
+siena_filter_set_used(
+ __in siena_filter_tbl_t *sftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL);
+ sftp->sft_bitmap[index / 32] |= (1 << (index % 32));
+ ++sftp->sft_used;
+}
+
+static void
+siena_filter_clear_used(
+ __in siena_filter_tbl_t *sftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL);
+ sftp->sft_bitmap[index / 32] &= ~(1 << (index % 32));
+
+ --sftp->sft_used;
+ EFSYS_ASSERT3U(sftp->sft_used, >=, 0);
+}
+
+
+static siena_filter_tbl_id_t
+siena_filter_tbl_id(
+ __in siena_filter_type_t type)
+{
+ siena_filter_tbl_id_t tbl_id;
+
+ switch (type) {
+ case EFX_SIENA_FILTER_RX_TCP_FULL:
+ case EFX_SIENA_FILTER_RX_TCP_WILD:
+ case EFX_SIENA_FILTER_RX_UDP_FULL:
+ case EFX_SIENA_FILTER_RX_UDP_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_RX_IP;
+ break;
+
+ case EFX_SIENA_FILTER_RX_MAC_FULL:
+ case EFX_SIENA_FILTER_RX_MAC_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_RX_MAC;
+ break;
+
+ case EFX_SIENA_FILTER_TX_TCP_FULL:
+ case EFX_SIENA_FILTER_TX_TCP_WILD:
+ case EFX_SIENA_FILTER_TX_UDP_FULL:
+ case EFX_SIENA_FILTER_TX_UDP_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_TX_IP;
+ break;
+
+ case EFX_SIENA_FILTER_TX_MAC_FULL:
+ case EFX_SIENA_FILTER_TX_MAC_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_TX_MAC;
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ tbl_id = EFX_SIENA_FILTER_NTBLS;
+ break;
+ }
+ return (tbl_id);
+}
+
+static void
+siena_filter_reset_search_depth(
+ __inout siena_filter_t *sfp,
+ __in siena_filter_tbl_id_t tbl_id)
+{
+ switch (tbl_id) {
+ case EFX_SIENA_FILTER_TBL_RX_IP:
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_WILD] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_WILD] = 0;
+ break;
+
+ case EFX_SIENA_FILTER_TBL_RX_MAC:
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_WILD] = 0;
+ break;
+
+ case EFX_SIENA_FILTER_TBL_TX_IP:
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_WILD] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_WILD] = 0;
+ break;
+
+ case EFX_SIENA_FILTER_TBL_TX_MAC:
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_WILD] = 0;
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ break;
+ }
+}
+
+static void
+siena_filter_push_rx_limits(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_FULL_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_WILD_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_FULL_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_WILD_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+
+ if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_MAC].sft_size) {
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+}
+
+static void
+siena_filter_push_tx_limits(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword);
+
+ if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_IP].sft_size != 0) {
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_MAC].sft_size != 0) {
+ EFX_SET_OWORD_FIELD(
+ oword, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(
+ oword, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword);
+}
+
+/* Build a filter entry and return its n-tuple key. */
+static __checkReturn uint32_t
+siena_filter_build(
+ __out efx_oword_t *filter,
+ __in siena_filter_spec_t *spec)
+{
+ uint32_t dword3;
+ uint32_t key;
+ uint8_t type = spec->sfs_type;
+ uint32_t flags = spec->sfs_flags;
+
+ switch (siena_filter_tbl_id(type)) {
+ case EFX_SIENA_FILTER_TBL_RX_IP: {
+ boolean_t is_udp = (type == EFX_SIENA_FILTER_RX_UDP_FULL ||
+ type == EFX_SIENA_FILTER_RX_UDP_WILD);
+ EFX_POPULATE_OWORD_7(*filter,
+ FRF_BZ_RSS_EN,
+ (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0,
+ FRF_BZ_SCATTER_EN,
+ (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0,
+ FRF_AZ_TCP_UDP, is_udp,
+ FRF_AZ_RXQ_ID, spec->sfs_dmaq_id,
+ EFX_DWORD_2, spec->sfs_dword[2],
+ EFX_DWORD_1, spec->sfs_dword[1],
+ EFX_DWORD_0, spec->sfs_dword[0]);
+ dword3 = is_udp;
+ break;
+ }
+
+ case EFX_SIENA_FILTER_TBL_RX_MAC: {
+ boolean_t is_wild = (type == EFX_SIENA_FILTER_RX_MAC_WILD);
+ EFX_POPULATE_OWORD_7(*filter,
+ FRF_CZ_RMFT_RSS_EN,
+ (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0,
+ FRF_CZ_RMFT_SCATTER_EN,
+ (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0,
+ FRF_CZ_RMFT_RXQ_ID, spec->sfs_dmaq_id,
+ FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_RMFT_DEST_MAC_DW1, spec->sfs_dword[2],
+ FRF_CZ_RMFT_DEST_MAC_DW0, spec->sfs_dword[1],
+ FRF_CZ_RMFT_VLAN_ID, spec->sfs_dword[0]);
+ dword3 = is_wild;
+ break;
+ }
+
+ case EFX_SIENA_FILTER_TBL_TX_IP: {
+ boolean_t is_udp = (type == EFX_SIENA_FILTER_TX_UDP_FULL ||
+ type == EFX_SIENA_FILTER_TX_UDP_WILD);
+ EFX_POPULATE_OWORD_5(*filter,
+ FRF_CZ_TIFT_TCP_UDP, is_udp,
+ FRF_CZ_TIFT_TXQ_ID, spec->sfs_dmaq_id,
+ EFX_DWORD_2, spec->sfs_dword[2],
+ EFX_DWORD_1, spec->sfs_dword[1],
+ EFX_DWORD_0, spec->sfs_dword[0]);
+ dword3 = is_udp | spec->sfs_dmaq_id << 1;
+ break;
+ }
+
+ case EFX_SIENA_FILTER_TBL_TX_MAC: {
+ boolean_t is_wild = (type == EFX_SIENA_FILTER_TX_MAC_WILD);
+ EFX_POPULATE_OWORD_5(*filter,
+ FRF_CZ_TMFT_TXQ_ID, spec->sfs_dmaq_id,
+ FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_TMFT_SRC_MAC_DW1, spec->sfs_dword[2],
+ FRF_CZ_TMFT_SRC_MAC_DW0, spec->sfs_dword[1],
+ FRF_CZ_TMFT_VLAN_ID, spec->sfs_dword[0]);
+ dword3 = is_wild | spec->sfs_dmaq_id << 1;
+ break;
+ }
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ return (0);
+ }
+
+ key =
+ spec->sfs_dword[0] ^
+ spec->sfs_dword[1] ^
+ spec->sfs_dword[2] ^
+ dword3;
+
+ return (key);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_push_entry(
+ __inout efx_nic_t *enp,
+ __in siena_filter_type_t type,
+ __in int index,
+ __in efx_oword_t *eop)
+{
+ efx_rc_t rc;
+
+ switch (type) {
+ case EFX_SIENA_FILTER_RX_TCP_FULL:
+ case EFX_SIENA_FILTER_RX_TCP_WILD:
+ case EFX_SIENA_FILTER_RX_UDP_FULL:
+ case EFX_SIENA_FILTER_RX_UDP_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ case EFX_SIENA_FILTER_RX_MAC_FULL:
+ case EFX_SIENA_FILTER_RX_MAC_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_CZ_RX_MAC_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ case EFX_SIENA_FILTER_TX_TCP_FULL:
+ case EFX_SIENA_FILTER_TX_TCP_WILD:
+ case EFX_SIENA_FILTER_TX_UDP_FULL:
+ case EFX_SIENA_FILTER_TX_UDP_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ case EFX_SIENA_FILTER_TX_MAC_FULL:
+ case EFX_SIENA_FILTER_TX_MAC_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_MAC_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ return (0);
+
+fail1:
+ return (rc);
+}
+
+
+static __checkReturn boolean_t
+siena_filter_equal(
+ __in const siena_filter_spec_t *left,
+ __in const siena_filter_spec_t *right)
+{
+ siena_filter_tbl_id_t tbl_id;
+
+ tbl_id = siena_filter_tbl_id(left->sfs_type);
+
+
+ if (left->sfs_type != right->sfs_type)
+ return (B_FALSE);
+
+ if (memcmp(left->sfs_dword, right->sfs_dword,
+ sizeof (left->sfs_dword)))
+ return (B_FALSE);
+
+ if ((tbl_id == EFX_SIENA_FILTER_TBL_TX_IP ||
+ tbl_id == EFX_SIENA_FILTER_TBL_TX_MAC) &&
+ left->sfs_dmaq_id != right->sfs_dmaq_id)
+ return (B_FALSE);
+
+ return (B_TRUE);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_search(
+ __in siena_filter_tbl_t *sftp,
+ __in siena_filter_spec_t *spec,
+ __in uint32_t key,
+ __in boolean_t for_insert,
+ __out int *filter_index,
+ __out unsigned int *depth_required)
+{
+ unsigned int hash, incr, filter_idx, depth;
+
+ hash = siena_filter_tbl_hash(key);
+ incr = siena_filter_tbl_increment(key);
+
+ filter_idx = hash & (sftp->sft_size - 1);
+ depth = 1;
+
+ for (;;) {
+ /*
+ * Return success if entry is used and matches this spec
+ * or entry is unused and we are trying to insert.
+ */
+ if (siena_filter_test_used(sftp, filter_idx) ?
+ siena_filter_equal(spec,
+ &sftp->sft_spec[filter_idx]) :
+ for_insert) {
+ *filter_index = filter_idx;
+ *depth_required = depth;
+ return (0);
+ }
+
+ /* Return failure if we reached the maximum search depth */
+ if (depth == FILTER_CTL_SRCH_MAX)
+ return (for_insert ? EBUSY : ENOENT);
+
+ filter_idx = (filter_idx + incr) & (sftp->sft_size - 1);
+ ++depth;
+ }
+}
+
+static void
+siena_filter_clear_entry(
+ __in efx_nic_t *enp,
+ __in siena_filter_tbl_t *sftp,
+ __in int index)
+{
+ efx_oword_t filter;
+
+ if (siena_filter_test_used(sftp, index)) {
+ siena_filter_clear_used(sftp, index);
+
+ EFX_ZERO_OWORD(filter);
+ siena_filter_push_entry(enp,
+ sftp->sft_spec[index].sfs_type,
+ index, &filter);
+
+ memset(&sftp->sft_spec[index],
+ 0, sizeof (sftp->sft_spec[0]));
+ }
+}
+
+ void
+siena_filter_tbl_clear(
+ __in efx_nic_t *enp,
+ __in siena_filter_tbl_id_t tbl_id)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_t *sftp = &sfp->sf_tbl[tbl_id];
+ int index;
+ efsys_lock_state_t state;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ for (index = 0; index < sftp->sft_size; ++index) {
+ siena_filter_clear_entry(enp, sftp, index);
+ }
+
+ if (sftp->sft_used == 0)
+ siena_filter_reset_search_depth(sfp, tbl_id);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_init(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp;
+ siena_filter_tbl_t *sftp;
+ int tbl_id;
+ efx_rc_t rc;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (siena_filter_t), sfp);
+
+ if (!sfp) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ enp->en_filter.ef_siena_filter = sfp;
+
+ switch (enp->en_family) {
+ case EFX_FAMILY_SIENA:
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_IP];
+ sftp->sft_size = FR_AZ_RX_FILTER_TBL0_ROWS;
+
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_MAC];
+ sftp->sft_size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
+
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_IP];
+ sftp->sft_size = FR_CZ_TX_FILTER_TBL0_ROWS;
+
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_MAC];
+ sftp->sft_size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
+ break;
+
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) {
+ unsigned int bitmap_size;
+
+ sftp = &sfp->sf_tbl[tbl_id];
+ if (sftp->sft_size == 0)
+ continue;
+
+ EFX_STATIC_ASSERT(sizeof (sftp->sft_bitmap[0]) ==
+ sizeof (uint32_t));
+ bitmap_size =
+ (sftp->sft_size + (sizeof (uint32_t) * 8) - 1) / 8;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, bitmap_size, sftp->sft_bitmap);
+ if (!sftp->sft_bitmap) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ EFSYS_KMEM_ALLOC(enp->en_esip,
+ sftp->sft_size * sizeof (*sftp->sft_spec),
+ sftp->sft_spec);
+ if (!sftp->sft_spec) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+ memset(sftp->sft_spec, 0,
+ sftp->sft_size * sizeof (*sftp->sft_spec));
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+ siena_filter_fini(enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static void
+siena_filter_fini(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (sfp == NULL)
+ return;
+
+ for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) {
+ siena_filter_tbl_t *sftp = &sfp->sf_tbl[tbl_id];
+ unsigned int bitmap_size;
+
+ EFX_STATIC_ASSERT(sizeof (sftp->sft_bitmap[0]) ==
+ sizeof (uint32_t));
+ bitmap_size =
+ (sftp->sft_size + (sizeof (uint32_t) * 8) - 1) / 8;
+
+ if (sftp->sft_bitmap != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip, bitmap_size,
+ sftp->sft_bitmap);
+ sftp->sft_bitmap = NULL;
+ }
+
+ if (sftp->sft_spec != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip, sftp->sft_size *
+ sizeof (*sftp->sft_spec), sftp->sft_spec);
+ sftp->sft_spec = NULL;
+ }
+ }
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (siena_filter_t),
+ enp->en_filter.ef_siena_filter);
+}
+
+/* Restore filter state after a reset */
+static __checkReturn efx_rc_t
+siena_filter_restore(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_t *sftp;
+ siena_filter_spec_t *spec;
+ efx_oword_t filter;
+ int filter_idx;
+ efsys_lock_state_t state;
+ uint32_t key;
+ efx_rc_t rc;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) {
+ sftp = &sfp->sf_tbl[tbl_id];
+ for (filter_idx = 0;
+ filter_idx < sftp->sft_size;
+ filter_idx++) {
+ if (!siena_filter_test_used(sftp, filter_idx))
+ continue;
+
+ spec = &sftp->sft_spec[filter_idx];
+ if ((key = siena_filter_build(&filter, spec)) == 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if ((rc = siena_filter_push_entry(enp,
+ spec->sfs_type, filter_idx, &filter)) != 0)
+ goto fail2;
+ }
+ }
+
+ siena_filter_push_rx_limits(enp);
+ siena_filter_push_tx_limits(enp);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace)
+{
+ efx_rc_t rc;
+ siena_filter_spec_t sf_spec;
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_t *sftp;
+ siena_filter_spec_t *saved_sf_spec;
+ efx_oword_t filter;
+ int filter_idx;
+ unsigned int depth;
+ efsys_lock_state_t state;
+ uint32_t key;
+
+
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ if ((rc = siena_filter_spec_from_gen_spec(&sf_spec, spec)) != 0)
+ goto fail1;
+
+ tbl_id = siena_filter_tbl_id(sf_spec.sfs_type);
+ sftp = &sfp->sf_tbl[tbl_id];
+
+ if (sftp->sft_size == 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ key = siena_filter_build(&filter, &sf_spec);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ rc = siena_filter_search(sftp, &sf_spec, key, B_TRUE,
+ &filter_idx, &depth);
+ if (rc != 0)
+ goto fail3;
+
+ EFSYS_ASSERT3U(filter_idx, <, sftp->sft_size);
+ saved_sf_spec = &sftp->sft_spec[filter_idx];
+
+ if (siena_filter_test_used(sftp, filter_idx)) {
+ if (may_replace == B_FALSE) {
+ rc = EEXIST;
+ goto fail4;
+ }
+ }
+ siena_filter_set_used(sftp, filter_idx);
+ *saved_sf_spec = sf_spec;
+
+ if (sfp->sf_depth[sf_spec.sfs_type] < depth) {
+ sfp->sf_depth[sf_spec.sfs_type] = depth;
+ if (tbl_id == EFX_SIENA_FILTER_TBL_TX_IP ||
+ tbl_id == EFX_SIENA_FILTER_TBL_TX_MAC)
+ siena_filter_push_tx_limits(enp);
+ else
+ siena_filter_push_rx_limits(enp);
+ }
+
+ siena_filter_push_entry(enp, sf_spec.sfs_type,
+ filter_idx, &filter);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+fail3:
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ efx_rc_t rc;
+ siena_filter_spec_t sf_spec;
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_t *sftp;
+ efx_oword_t filter;
+ int filter_idx;
+ unsigned int depth;
+ efsys_lock_state_t state;
+ uint32_t key;
+
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ if ((rc = siena_filter_spec_from_gen_spec(&sf_spec, spec)) != 0)
+ goto fail1;
+
+ tbl_id = siena_filter_tbl_id(sf_spec.sfs_type);
+ sftp = &sfp->sf_tbl[tbl_id];
+
+ key = siena_filter_build(&filter, &sf_spec);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ rc = siena_filter_search(sftp, &sf_spec, key, B_FALSE,
+ &filter_idx, &depth);
+ if (rc != 0)
+ goto fail2;
+
+ siena_filter_clear_entry(enp, sftp, filter_idx);
+ if (sftp->sft_used == 0)
+ siena_filter_reset_search_depth(sfp, tbl_id);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return (0);
+
+fail2:
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+#define SIENA_MAX_SUPPORTED_MATCHES 4
+
+static __checkReturn efx_rc_t
+siena_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp)
+{
+ uint32_t index = 0;
+ uint32_t rx_matches[SIENA_MAX_SUPPORTED_MATCHES];
+ size_t list_length;
+ efx_rc_t rc;
+
+ rx_matches[index++] =
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+
+ rx_matches[index++] =
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+
+ if (enp->en_features & EFX_FEATURE_MAC_HEADER_FILTERS) {
+ rx_matches[index++] =
+ EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC;
+
+ rx_matches[index++] = EFX_FILTER_MATCH_LOC_MAC;
+ }
+
+ EFSYS_ASSERT3U(index, <=, SIENA_MAX_SUPPORTED_MATCHES);
+ list_length = index;
+
+ *list_lengthp = list_length;
+
+ if (buffer_length < list_length) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ memcpy(buffer, rx_matches, list_length * sizeof (rx_matches[0]));
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#undef MAX_SUPPORTED
+
+#endif /* EFSYS_OPT_SIENA */
+
+#endif /* EFSYS_OPT_FILTER */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_hash.c b/src/seastar/dpdk/drivers/net/sfc/base/efx_hash.c
new file mode 100644
index 00000000..3cc0d200
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_hash.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2006 Bob Jenkins
+ *
+ * Derived from public domain source, see
+ * <http://burtleburtle.net/bob/c/lookup3.c>:
+ *
+ * "lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+ *
+ * These are functions for producing 32-bit hashes for hash table lookup...
+ * ...You can use this free for any purpose. It's in the public domain.
+ * It has no warranty."
+ *
+ * Copyright (c) 2014-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+/* Hash initial value */
+#define EFX_HASH_INITIAL_VALUE 0xdeadbeef
+
+/*
+ * Rotate a 32-bit value left
+ *
+ * Allow platform to provide an intrinsic or optimised routine and
+ * fall-back to a simple shift based implementation.
+ */
+#if EFSYS_HAS_ROTL_DWORD
+
+#define EFX_HASH_ROTATE(_value, _shift) \
+ EFSYS_ROTL_DWORD(_value, _shift)
+
+#else
+
+#define EFX_HASH_ROTATE(_value, _shift) \
+ (((_value) << (_shift)) | ((_value) >> (32 - (_shift))))
+
+#endif
+
+/* Mix three 32-bit values reversibly */
+#define EFX_HASH_MIX(_a, _b, _c) \
+ do { \
+ _a -= _c; \
+ _a ^= EFX_HASH_ROTATE(_c, 4); \
+ _c += _b; \
+ _b -= _a; \
+ _b ^= EFX_HASH_ROTATE(_a, 6); \
+ _a += _c; \
+ _c -= _b; \
+ _c ^= EFX_HASH_ROTATE(_b, 8); \
+ _b += _a; \
+ _a -= _c; \
+ _a ^= EFX_HASH_ROTATE(_c, 16); \
+ _c += _b; \
+ _b -= _a; \
+ _b ^= EFX_HASH_ROTATE(_a, 19); \
+ _a += _c; \
+ _c -= _b; \
+ _c ^= EFX_HASH_ROTATE(_b, 4); \
+ _b += _a; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* Final mixing of three 32-bit values into one (_c) */
+#define EFX_HASH_FINALISE(_a, _b, _c) \
+ do { \
+ _c ^= _b; \
+ _c -= EFX_HASH_ROTATE(_b, 14); \
+ _a ^= _c; \
+ _a -= EFX_HASH_ROTATE(_c, 11); \
+ _b ^= _a; \
+ _b -= EFX_HASH_ROTATE(_a, 25); \
+ _c ^= _b; \
+ _c -= EFX_HASH_ROTATE(_b, 16); \
+ _a ^= _c; \
+ _a -= EFX_HASH_ROTATE(_c, 4); \
+ _b ^= _a; \
+ _b -= EFX_HASH_ROTATE(_a, 14); \
+ _c ^= _b; \
+ _c -= EFX_HASH_ROTATE(_b, 24); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+
+/* Produce a 32-bit hash from 32-bit aligned input */
+ __checkReturn uint32_t
+efx_hash_dwords(
+ __in_ecount(count) uint32_t const *input,
+ __in size_t count,
+ __in uint32_t init)
+{
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+
+ /* Set up the initial internal state */
+ a = b = c = EFX_HASH_INITIAL_VALUE +
+ (((uint32_t)count) * sizeof (uint32_t)) + init;
+
+ /* Handle all but the last three dwords of the input */
+ while (count > 3) {
+ a += input[0];
+ b += input[1];
+ c += input[2];
+ EFX_HASH_MIX(a, b, c);
+
+ count -= 3;
+ input += 3;
+ }
+
+ /* Handle the left-overs */
+ switch (count) {
+ case 3:
+ c += input[2];
+ /* Fall-through */
+ case 2:
+ b += input[1];
+ /* Fall-through */
+ case 1:
+ a += input[0];
+ EFX_HASH_FINALISE(a, b, c);
+ break;
+
+ case 0:
+ /* Should only get here if count parameter was zero */
+ break;
+ }
+
+ return (c);
+}
+
+#if EFSYS_IS_BIG_ENDIAN
+
+/* Produce a 32-bit hash from arbitrarily aligned input */
+ __checkReturn uint32_t
+efx_hash_bytes(
+ __in_ecount(length) uint8_t const *input,
+ __in size_t length,
+ __in uint32_t init)
+{
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+
+ /* Set up the initial internal state */
+ a = b = c = EFX_HASH_INITIAL_VALUE + (uint32_t)length + init;
+
+ /* Handle all but the last twelve bytes of the input */
+ while (length > 12) {
+ a += ((uint32_t)input[0]) << 24;
+ a += ((uint32_t)input[1]) << 16;
+ a += ((uint32_t)input[2]) << 8;
+ a += ((uint32_t)input[3]);
+ b += ((uint32_t)input[4]) << 24;
+ b += ((uint32_t)input[5]) << 16;
+ b += ((uint32_t)input[6]) << 8;
+ b += ((uint32_t)input[7]);
+ c += ((uint32_t)input[8]) << 24;
+ c += ((uint32_t)input[9]) << 16;
+ c += ((uint32_t)input[10]) << 8;
+ c += ((uint32_t)input[11]);
+ EFX_HASH_MIX(a, b, c);
+ length -= 12;
+ input += 12;
+ }
+
+ /* Handle the left-overs */
+ switch (length) {
+ case 12:
+ c += ((uint32_t)input[11]);
+ /* Fall-through */
+ case 11:
+ c += ((uint32_t)input[10]) << 8;
+ /* Fall-through */
+ case 10:
+ c += ((uint32_t)input[9]) << 16;
+ /* Fall-through */
+ case 9:
+ c += ((uint32_t)input[8]) << 24;
+ /* Fall-through */
+ case 8:
+ b += ((uint32_t)input[7]);
+ /* Fall-through */
+ case 7:
+ b += ((uint32_t)input[6]) << 8;
+ /* Fall-through */
+ case 6:
+ b += ((uint32_t)input[5]) << 16;
+ /* Fall-through */
+ case 5:
+ b += ((uint32_t)input[4]) << 24;
+ /* Fall-through */
+ case 4:
+ a += ((uint32_t)input[3]);
+ /* Fall-through */
+ case 3:
+ a += ((uint32_t)input[2]) << 8;
+ /* Fall-through */
+ case 2:
+ a += ((uint32_t)input[1]) << 16;
+ /* Fall-through */
+ case 1:
+ a += ((uint32_t)input[0]) << 24;
+ EFX_HASH_FINALISE(a, b, c);
+ break;
+
+ case 0:
+ /* Should only get here if length parameter was zero */
+ break;
+ }
+
+ return (c);
+}
+
+#elif EFSYS_IS_LITTLE_ENDIAN
+
+/* Produce a 32-bit hash from arbitrarily aligned input */
+ __checkReturn uint32_t
+efx_hash_bytes(
+ __in_ecount(length) uint8_t const *input,
+ __in size_t length,
+ __in uint32_t init)
+{
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+
+ /* Set up the initial internal state */
+ a = b = c = EFX_HASH_INITIAL_VALUE + (uint32_t)length + init;
+
+ /* Handle all but the last twelve bytes of the input */
+ while (length > 12) {
+ a += ((uint32_t)input[0]);
+ a += ((uint32_t)input[1]) << 8;
+ a += ((uint32_t)input[2]) << 16;
+ a += ((uint32_t)input[3]) << 24;
+ b += ((uint32_t)input[4]);
+ b += ((uint32_t)input[5]) << 8;
+ b += ((uint32_t)input[6]) << 16;
+ b += ((uint32_t)input[7]) << 24;
+ c += ((uint32_t)input[8]);
+ c += ((uint32_t)input[9]) << 8;
+ c += ((uint32_t)input[10]) << 16;
+ c += ((uint32_t)input[11]) << 24;
+ EFX_HASH_MIX(a, b, c);
+ length -= 12;
+ input += 12;
+ }
+
+ /* Handle the left-overs */
+ switch (length) {
+ case 12:
+ c += ((uint32_t)input[11]) << 24;
+ /* Fall-through */
+ case 11:
+ c += ((uint32_t)input[10]) << 16;
+ /* Fall-through */
+ case 10:
+ c += ((uint32_t)input[9]) << 8;
+ /* Fall-through */
+ case 9:
+ c += ((uint32_t)input[8]);
+ /* Fall-through */
+ case 8:
+ b += ((uint32_t)input[7]) << 24;
+ /* Fall-through */
+ case 7:
+ b += ((uint32_t)input[6]) << 16;
+ /* Fall-through */
+ case 6:
+ b += ((uint32_t)input[5]) << 8;
+ /* Fall-through */
+ case 5:
+ b += ((uint32_t)input[4]);
+ /* Fall-through */
+ case 4:
+ a += ((uint32_t)input[3]) << 24;
+ /* Fall-through */
+ case 3:
+ a += ((uint32_t)input[2]) << 16;
+ /* Fall-through */
+ case 2:
+ a += ((uint32_t)input[1]) << 8;
+ /* Fall-through */
+ case 1:
+ a += ((uint32_t)input[0]);
+ EFX_HASH_FINALISE(a, b, c);
+ break;
+
+ case 0:
+ /* Should only get here if length parameter was zero */
+ break;
+ }
+
+ return (c);
+}
+
+#else
+
+#error "Neither of EFSYS_IS_{BIG,LITTLE}_ENDIAN is set"
+
+#endif
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_impl.h b/src/seastar/dpdk/drivers/net/sfc/base/efx_impl.h
new file mode 100644
index 00000000..43add6d9
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_impl.h
@@ -0,0 +1,1208 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_IMPL_H
+#define _SYS_EFX_IMPL_H
+
+#include "efx.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+/* FIXME: Add definition for driver generated software events */
+#ifndef ESE_DZ_EV_CODE_DRV_GEN_EV
+#define ESE_DZ_EV_CODE_DRV_GEN_EV FSE_AZ_EV_CODE_DRV_GEN_EV
+#endif
+
+
+#if EFSYS_OPT_SIENA
+#include "siena_impl.h"
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+#include "hunt_impl.h"
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+#include "medford_impl.h"
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+#include "ef10_impl.h"
+#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EFX_MOD_MCDI 0x00000001
+#define EFX_MOD_PROBE 0x00000002
+#define EFX_MOD_NVRAM 0x00000004
+#define EFX_MOD_VPD 0x00000008
+#define EFX_MOD_NIC 0x00000010
+#define EFX_MOD_INTR 0x00000020
+#define EFX_MOD_EV 0x00000040
+#define EFX_MOD_RX 0x00000080
+#define EFX_MOD_TX 0x00000100
+#define EFX_MOD_PORT 0x00000200
+#define EFX_MOD_MON 0x00000400
+#define EFX_MOD_FILTER 0x00001000
+#define EFX_MOD_LIC 0x00002000
+
+#define EFX_RESET_PHY 0x00000001
+#define EFX_RESET_RXQ_ERR 0x00000002
+#define EFX_RESET_TXQ_ERR 0x00000004
+
+typedef enum efx_mac_type_e {
+ EFX_MAC_INVALID = 0,
+ EFX_MAC_SIENA,
+ EFX_MAC_HUNTINGTON,
+ EFX_MAC_MEDFORD,
+ EFX_MAC_NTYPES
+} efx_mac_type_t;
+
+typedef struct efx_ev_ops_s {
+ efx_rc_t (*eevo_init)(efx_nic_t *);
+ void (*eevo_fini)(efx_nic_t *);
+ efx_rc_t (*eevo_qcreate)(efx_nic_t *, unsigned int,
+ efsys_mem_t *, size_t, uint32_t,
+ uint32_t, uint32_t, efx_evq_t *);
+ void (*eevo_qdestroy)(efx_evq_t *);
+ efx_rc_t (*eevo_qprime)(efx_evq_t *, unsigned int);
+ void (*eevo_qpost)(efx_evq_t *, uint16_t);
+ efx_rc_t (*eevo_qmoderate)(efx_evq_t *, unsigned int);
+#if EFSYS_OPT_QSTATS
+ void (*eevo_qstats_update)(efx_evq_t *, efsys_stat_t *);
+#endif
+} efx_ev_ops_t;
+
+typedef struct efx_tx_ops_s {
+ efx_rc_t (*etxo_init)(efx_nic_t *);
+ void (*etxo_fini)(efx_nic_t *);
+ efx_rc_t (*etxo_qcreate)(efx_nic_t *,
+ unsigned int, unsigned int,
+ efsys_mem_t *, size_t,
+ uint32_t, uint16_t,
+ efx_evq_t *, efx_txq_t *,
+ unsigned int *);
+ void (*etxo_qdestroy)(efx_txq_t *);
+ efx_rc_t (*etxo_qpost)(efx_txq_t *, efx_buffer_t *,
+ unsigned int, unsigned int,
+ unsigned int *);
+ void (*etxo_qpush)(efx_txq_t *, unsigned int, unsigned int);
+ efx_rc_t (*etxo_qpace)(efx_txq_t *, unsigned int);
+ efx_rc_t (*etxo_qflush)(efx_txq_t *);
+ void (*etxo_qenable)(efx_txq_t *);
+ efx_rc_t (*etxo_qpio_enable)(efx_txq_t *);
+ void (*etxo_qpio_disable)(efx_txq_t *);
+ efx_rc_t (*etxo_qpio_write)(efx_txq_t *, uint8_t *, size_t,
+ size_t);
+ efx_rc_t (*etxo_qpio_post)(efx_txq_t *, size_t, unsigned int,
+ unsigned int *);
+ efx_rc_t (*etxo_qdesc_post)(efx_txq_t *, efx_desc_t *,
+ unsigned int, unsigned int,
+ unsigned int *);
+ void (*etxo_qdesc_dma_create)(efx_txq_t *, efsys_dma_addr_t,
+ size_t, boolean_t,
+ efx_desc_t *);
+ void (*etxo_qdesc_tso_create)(efx_txq_t *, uint16_t,
+ uint32_t, uint8_t,
+ efx_desc_t *);
+ void (*etxo_qdesc_tso2_create)(efx_txq_t *, uint16_t,
+ uint32_t, uint16_t,
+ efx_desc_t *, int);
+ void (*etxo_qdesc_vlantci_create)(efx_txq_t *, uint16_t,
+ efx_desc_t *);
+#if EFSYS_OPT_QSTATS
+ void (*etxo_qstats_update)(efx_txq_t *,
+ efsys_stat_t *);
+#endif
+} efx_tx_ops_t;
+
+typedef struct efx_rx_ops_s {
+ efx_rc_t (*erxo_init)(efx_nic_t *);
+ void (*erxo_fini)(efx_nic_t *);
+#if EFSYS_OPT_RX_SCATTER
+ efx_rc_t (*erxo_scatter_enable)(efx_nic_t *, unsigned int);
+#endif
+#if EFSYS_OPT_RX_SCALE
+ efx_rc_t (*erxo_scale_mode_set)(efx_nic_t *, efx_rx_hash_alg_t,
+ efx_rx_hash_type_t, boolean_t);
+ efx_rc_t (*erxo_scale_key_set)(efx_nic_t *, uint8_t *, size_t);
+ efx_rc_t (*erxo_scale_tbl_set)(efx_nic_t *, unsigned int *,
+ size_t);
+ uint32_t (*erxo_prefix_hash)(efx_nic_t *, efx_rx_hash_alg_t,
+ uint8_t *);
+#endif /* EFSYS_OPT_RX_SCALE */
+ efx_rc_t (*erxo_prefix_pktlen)(efx_nic_t *, uint8_t *,
+ uint16_t *);
+ void (*erxo_qpost)(efx_rxq_t *, efsys_dma_addr_t *, size_t,
+ unsigned int, unsigned int,
+ unsigned int);
+ void (*erxo_qpush)(efx_rxq_t *, unsigned int, unsigned int *);
+#if EFSYS_OPT_RX_PACKED_STREAM
+ void (*erxo_qps_update_credits)(efx_rxq_t *);
+ uint8_t * (*erxo_qps_packet_info)(efx_rxq_t *, uint8_t *,
+ uint32_t, uint32_t,
+ uint16_t *, uint32_t *, uint32_t *);
+#endif
+ efx_rc_t (*erxo_qflush)(efx_rxq_t *);
+ void (*erxo_qenable)(efx_rxq_t *);
+ efx_rc_t (*erxo_qcreate)(efx_nic_t *enp, unsigned int,
+ unsigned int, efx_rxq_type_t,
+ efsys_mem_t *, size_t, uint32_t,
+ efx_evq_t *, efx_rxq_t *);
+ void (*erxo_qdestroy)(efx_rxq_t *);
+} efx_rx_ops_t;
+
+typedef struct efx_mac_ops_s {
+ efx_rc_t (*emo_poll)(efx_nic_t *, efx_link_mode_t *);
+ efx_rc_t (*emo_up)(efx_nic_t *, boolean_t *);
+ efx_rc_t (*emo_addr_set)(efx_nic_t *);
+ efx_rc_t (*emo_pdu_set)(efx_nic_t *);
+ efx_rc_t (*emo_pdu_get)(efx_nic_t *, size_t *);
+ efx_rc_t (*emo_reconfigure)(efx_nic_t *);
+ efx_rc_t (*emo_multicast_list_set)(efx_nic_t *);
+ efx_rc_t (*emo_filter_default_rxq_set)(efx_nic_t *,
+ efx_rxq_t *, boolean_t);
+ void (*emo_filter_default_rxq_clear)(efx_nic_t *);
+#if EFSYS_OPT_LOOPBACK
+ efx_rc_t (*emo_loopback_set)(efx_nic_t *, efx_link_mode_t,
+ efx_loopback_type_t);
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_MAC_STATS
+ efx_rc_t (*emo_stats_get_mask)(efx_nic_t *, uint32_t *, size_t);
+ efx_rc_t (*emo_stats_clear)(efx_nic_t *);
+ efx_rc_t (*emo_stats_upload)(efx_nic_t *, efsys_mem_t *);
+ efx_rc_t (*emo_stats_periodic)(efx_nic_t *, efsys_mem_t *,
+ uint16_t, boolean_t);
+ efx_rc_t (*emo_stats_update)(efx_nic_t *, efsys_mem_t *,
+ efsys_stat_t *, uint32_t *);
+#endif /* EFSYS_OPT_MAC_STATS */
+} efx_mac_ops_t;
+
+typedef struct efx_phy_ops_s {
+ efx_rc_t (*epo_power)(efx_nic_t *, boolean_t); /* optional */
+ efx_rc_t (*epo_reset)(efx_nic_t *);
+ efx_rc_t (*epo_reconfigure)(efx_nic_t *);
+ efx_rc_t (*epo_verify)(efx_nic_t *);
+ efx_rc_t (*epo_oui_get)(efx_nic_t *, uint32_t *);
+#if EFSYS_OPT_PHY_STATS
+ efx_rc_t (*epo_stats_update)(efx_nic_t *, efsys_mem_t *,
+ uint32_t *);
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_BIST
+ efx_rc_t (*epo_bist_enable_offline)(efx_nic_t *);
+ efx_rc_t (*epo_bist_start)(efx_nic_t *, efx_bist_type_t);
+ efx_rc_t (*epo_bist_poll)(efx_nic_t *, efx_bist_type_t,
+ efx_bist_result_t *, uint32_t *,
+ unsigned long *, size_t);
+ void (*epo_bist_stop)(efx_nic_t *, efx_bist_type_t);
+#endif /* EFSYS_OPT_BIST */
+} efx_phy_ops_t;
+
+#if EFSYS_OPT_FILTER
+typedef struct efx_filter_ops_s {
+ efx_rc_t (*efo_init)(efx_nic_t *);
+ void (*efo_fini)(efx_nic_t *);
+ efx_rc_t (*efo_restore)(efx_nic_t *);
+ efx_rc_t (*efo_add)(efx_nic_t *, efx_filter_spec_t *,
+ boolean_t may_replace);
+ efx_rc_t (*efo_delete)(efx_nic_t *, efx_filter_spec_t *);
+ efx_rc_t (*efo_supported_filters)(efx_nic_t *, uint32_t *,
+ size_t, size_t *);
+ efx_rc_t (*efo_reconfigure)(efx_nic_t *, uint8_t const *, boolean_t,
+ boolean_t, boolean_t, boolean_t,
+ uint8_t const *, uint32_t);
+} efx_filter_ops_t;
+
+extern __checkReturn efx_rc_t
+efx_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count);
+
+#endif /* EFSYS_OPT_FILTER */
+
+
+typedef struct efx_port_s {
+ efx_mac_type_t ep_mac_type;
+ uint32_t ep_phy_type;
+ uint8_t ep_port;
+ uint32_t ep_mac_pdu;
+ uint8_t ep_mac_addr[6];
+ efx_link_mode_t ep_link_mode;
+ boolean_t ep_all_unicst;
+ boolean_t ep_mulcst;
+ boolean_t ep_all_mulcst;
+ boolean_t ep_brdcst;
+ unsigned int ep_fcntl;
+ boolean_t ep_fcntl_autoneg;
+ efx_oword_t ep_multicst_hash[2];
+ uint8_t ep_mulcst_addr_list[EFX_MAC_ADDR_LEN *
+ EFX_MAC_MULTICAST_LIST_MAX];
+ uint32_t ep_mulcst_addr_count;
+#if EFSYS_OPT_LOOPBACK
+ efx_loopback_type_t ep_loopback_type;
+ efx_link_mode_t ep_loopback_link_mode;
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_PHY_FLAGS
+ uint32_t ep_phy_flags;
+#endif /* EFSYS_OPT_PHY_FLAGS */
+#if EFSYS_OPT_PHY_LED_CONTROL
+ efx_phy_led_mode_t ep_phy_led_mode;
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+ efx_phy_media_type_t ep_fixed_port_type;
+ efx_phy_media_type_t ep_module_type;
+ uint32_t ep_adv_cap_mask;
+ uint32_t ep_lp_cap_mask;
+ uint32_t ep_default_adv_cap_mask;
+ uint32_t ep_phy_cap_mask;
+ boolean_t ep_mac_drain;
+ boolean_t ep_mac_stats_pending;
+#if EFSYS_OPT_BIST
+ efx_bist_type_t ep_current_bist;
+#endif
+ const efx_mac_ops_t *ep_emop;
+ const efx_phy_ops_t *ep_epop;
+} efx_port_t;
+
+typedef struct efx_mon_ops_s {
+#if EFSYS_OPT_MON_STATS
+ efx_rc_t (*emo_stats_update)(efx_nic_t *, efsys_mem_t *,
+ efx_mon_stat_value_t *);
+#endif /* EFSYS_OPT_MON_STATS */
+} efx_mon_ops_t;
+
+typedef struct efx_mon_s {
+ efx_mon_type_t em_type;
+ const efx_mon_ops_t *em_emop;
+} efx_mon_t;
+
+typedef struct efx_intr_ops_s {
+ efx_rc_t (*eio_init)(efx_nic_t *, efx_intr_type_t, efsys_mem_t *);
+ void (*eio_enable)(efx_nic_t *);
+ void (*eio_disable)(efx_nic_t *);
+ void (*eio_disable_unlocked)(efx_nic_t *);
+ efx_rc_t (*eio_trigger)(efx_nic_t *, unsigned int);
+ void (*eio_status_line)(efx_nic_t *, boolean_t *, uint32_t *);
+ void (*eio_status_message)(efx_nic_t *, unsigned int,
+ boolean_t *);
+ void (*eio_fatal)(efx_nic_t *);
+ void (*eio_fini)(efx_nic_t *);
+} efx_intr_ops_t;
+
+typedef struct efx_intr_s {
+ const efx_intr_ops_t *ei_eiop;
+ efsys_mem_t *ei_esmp;
+ efx_intr_type_t ei_type;
+ unsigned int ei_level;
+} efx_intr_t;
+
+typedef struct efx_nic_ops_s {
+ efx_rc_t (*eno_probe)(efx_nic_t *);
+ efx_rc_t (*eno_board_cfg)(efx_nic_t *);
+ efx_rc_t (*eno_set_drv_limits)(efx_nic_t *, efx_drv_limits_t*);
+ efx_rc_t (*eno_reset)(efx_nic_t *);
+ efx_rc_t (*eno_init)(efx_nic_t *);
+ efx_rc_t (*eno_get_vi_pool)(efx_nic_t *, uint32_t *);
+ efx_rc_t (*eno_get_bar_region)(efx_nic_t *, efx_nic_region_t,
+ uint32_t *, size_t *);
+#if EFSYS_OPT_DIAG
+ efx_rc_t (*eno_register_test)(efx_nic_t *);
+#endif /* EFSYS_OPT_DIAG */
+ void (*eno_fini)(efx_nic_t *);
+ void (*eno_unprobe)(efx_nic_t *);
+} efx_nic_ops_t;
+
+#ifndef EFX_TXQ_LIMIT_TARGET
+#define EFX_TXQ_LIMIT_TARGET 259
+#endif
+#ifndef EFX_RXQ_LIMIT_TARGET
+#define EFX_RXQ_LIMIT_TARGET 512
+#endif
+#ifndef EFX_TXQ_DC_SIZE
+#define EFX_TXQ_DC_SIZE 1 /* 16 descriptors */
+#endif
+#ifndef EFX_RXQ_DC_SIZE
+#define EFX_RXQ_DC_SIZE 3 /* 64 descriptors */
+#endif
+
+#if EFSYS_OPT_FILTER
+
+#if EFSYS_OPT_SIENA
+
+typedef struct siena_filter_spec_s {
+ uint8_t sfs_type;
+ uint32_t sfs_flags;
+ uint32_t sfs_dmaq_id;
+ uint32_t sfs_dword[3];
+} siena_filter_spec_t;
+
+typedef enum siena_filter_type_e {
+ EFX_SIENA_FILTER_RX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_SIENA_FILTER_RX_TCP_WILD, /* TCP/IPv4 {dIP,dTCP, -, -} */
+ EFX_SIENA_FILTER_RX_UDP_FULL, /* UDP/IPv4 {dIP,dUDP,sIP,sUDP} */
+ EFX_SIENA_FILTER_RX_UDP_WILD, /* UDP/IPv4 {dIP,dUDP, -, -} */
+ EFX_SIENA_FILTER_RX_MAC_FULL, /* Ethernet {dMAC,VLAN} */
+ EFX_SIENA_FILTER_RX_MAC_WILD, /* Ethernet {dMAC, -} */
+
+ EFX_SIENA_FILTER_TX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_SIENA_FILTER_TX_TCP_WILD, /* TCP/IPv4 { -, -,sIP,sTCP} */
+ EFX_SIENA_FILTER_TX_UDP_FULL, /* UDP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_SIENA_FILTER_TX_UDP_WILD, /* UDP/IPv4 { -, -,sIP,sUDP} */
+ EFX_SIENA_FILTER_TX_MAC_FULL, /* Ethernet {sMAC,VLAN} */
+ EFX_SIENA_FILTER_TX_MAC_WILD, /* Ethernet {sMAC, -} */
+
+ EFX_SIENA_FILTER_NTYPES
+} siena_filter_type_t;
+
+typedef enum siena_filter_tbl_id_e {
+ EFX_SIENA_FILTER_TBL_RX_IP = 0,
+ EFX_SIENA_FILTER_TBL_RX_MAC,
+ EFX_SIENA_FILTER_TBL_TX_IP,
+ EFX_SIENA_FILTER_TBL_TX_MAC,
+ EFX_SIENA_FILTER_NTBLS
+} siena_filter_tbl_id_t;
+
+typedef struct siena_filter_tbl_s {
+ int sft_size; /* number of entries */
+ int sft_used; /* active count */
+ uint32_t *sft_bitmap; /* active bitmap */
+ siena_filter_spec_t *sft_spec; /* array of saved specs */
+} siena_filter_tbl_t;
+
+typedef struct siena_filter_s {
+ siena_filter_tbl_t sf_tbl[EFX_SIENA_FILTER_NTBLS];
+ unsigned int sf_depth[EFX_SIENA_FILTER_NTYPES];
+} siena_filter_t;
+
+#endif /* EFSYS_OPT_SIENA */
+
+typedef struct efx_filter_s {
+#if EFSYS_OPT_SIENA
+ siena_filter_t *ef_siena_filter;
+#endif /* EFSYS_OPT_SIENA */
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ ef10_filter_table_t *ef_ef10_filter_table;
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+} efx_filter_t;
+
+#if EFSYS_OPT_SIENA
+
+extern void
+siena_filter_tbl_clear(
+ __in efx_nic_t *enp,
+ __in siena_filter_tbl_id_t tbl);
+
+#endif /* EFSYS_OPT_SIENA */
+
+#endif /* EFSYS_OPT_FILTER */
+
+#if EFSYS_OPT_MCDI
+
+typedef struct efx_mcdi_ops_s {
+ efx_rc_t (*emco_init)(efx_nic_t *, const efx_mcdi_transport_t *);
+ void (*emco_send_request)(efx_nic_t *, void *, size_t,
+ void *, size_t);
+ efx_rc_t (*emco_poll_reboot)(efx_nic_t *);
+ boolean_t (*emco_poll_response)(efx_nic_t *);
+ void (*emco_read_response)(efx_nic_t *, void *, size_t, size_t);
+ void (*emco_fini)(efx_nic_t *);
+ efx_rc_t (*emco_feature_supported)(efx_nic_t *,
+ efx_mcdi_feature_id_t, boolean_t *);
+ void (*emco_get_timeout)(efx_nic_t *, efx_mcdi_req_t *,
+ uint32_t *);
+} efx_mcdi_ops_t;
+
+typedef struct efx_mcdi_s {
+ const efx_mcdi_ops_t *em_emcop;
+ const efx_mcdi_transport_t *em_emtp;
+ efx_mcdi_iface_t em_emip;
+} efx_mcdi_t;
+
+#endif /* EFSYS_OPT_MCDI */
+
+#if EFSYS_OPT_NVRAM
+typedef struct efx_nvram_ops_s {
+#if EFSYS_OPT_DIAG
+ efx_rc_t (*envo_test)(efx_nic_t *);
+#endif /* EFSYS_OPT_DIAG */
+ efx_rc_t (*envo_type_to_partn)(efx_nic_t *, efx_nvram_type_t,
+ uint32_t *);
+ efx_rc_t (*envo_partn_size)(efx_nic_t *, uint32_t, size_t *);
+ efx_rc_t (*envo_partn_rw_start)(efx_nic_t *, uint32_t, size_t *);
+ efx_rc_t (*envo_partn_read)(efx_nic_t *, uint32_t,
+ unsigned int, caddr_t, size_t);
+ efx_rc_t (*envo_partn_erase)(efx_nic_t *, uint32_t,
+ unsigned int, size_t);
+ efx_rc_t (*envo_partn_write)(efx_nic_t *, uint32_t,
+ unsigned int, caddr_t, size_t);
+ efx_rc_t (*envo_partn_rw_finish)(efx_nic_t *, uint32_t);
+ efx_rc_t (*envo_partn_get_version)(efx_nic_t *, uint32_t,
+ uint32_t *, uint16_t *);
+ efx_rc_t (*envo_partn_set_version)(efx_nic_t *, uint32_t,
+ uint16_t *);
+ efx_rc_t (*envo_buffer_validate)(efx_nic_t *, uint32_t,
+ caddr_t, size_t);
+} efx_nvram_ops_t;
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_VPD
+typedef struct efx_vpd_ops_s {
+ efx_rc_t (*evpdo_init)(efx_nic_t *);
+ efx_rc_t (*evpdo_size)(efx_nic_t *, size_t *);
+ efx_rc_t (*evpdo_read)(efx_nic_t *, caddr_t, size_t);
+ efx_rc_t (*evpdo_verify)(efx_nic_t *, caddr_t, size_t);
+ efx_rc_t (*evpdo_reinit)(efx_nic_t *, caddr_t, size_t);
+ efx_rc_t (*evpdo_get)(efx_nic_t *, caddr_t, size_t,
+ efx_vpd_value_t *);
+ efx_rc_t (*evpdo_set)(efx_nic_t *, caddr_t, size_t,
+ efx_vpd_value_t *);
+ efx_rc_t (*evpdo_next)(efx_nic_t *, caddr_t, size_t,
+ efx_vpd_value_t *, unsigned int *);
+ efx_rc_t (*evpdo_write)(efx_nic_t *, caddr_t, size_t);
+ void (*evpdo_fini)(efx_nic_t *);
+} efx_vpd_ops_t;
+#endif /* EFSYS_OPT_VPD */
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_partitions(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __out unsigned int *npartnp);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_metadata(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4],
+ __out_bcount_opt(size) char *descp,
+ __in size_t size);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_info(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt size_t *sizep,
+ __out_opt uint32_t *addressp,
+ __out_opt uint32_t *erase_sizep,
+ __out_opt uint32_t *write_sizep);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __in size_t size);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t reboot,
+ __out_opt uint32_t *resultp);
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_test(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+#endif /* EFSYS_OPT_DIAG */
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_LICENSING
+
+typedef struct efx_lic_ops_s {
+ efx_rc_t (*elo_update_licenses)(efx_nic_t *);
+ efx_rc_t (*elo_get_key_stats)(efx_nic_t *, efx_key_stats_t *);
+ efx_rc_t (*elo_app_state)(efx_nic_t *, uint64_t, boolean_t *);
+ efx_rc_t (*elo_get_id)(efx_nic_t *, size_t, uint32_t *,
+ size_t *, uint8_t *);
+ efx_rc_t (*elo_find_start)
+ (efx_nic_t *, caddr_t, size_t, uint32_t *);
+ efx_rc_t (*elo_find_end)(efx_nic_t *, caddr_t, size_t,
+ uint32_t, uint32_t *);
+ boolean_t (*elo_find_key)(efx_nic_t *, caddr_t, size_t,
+ uint32_t, uint32_t *, uint32_t *);
+ boolean_t (*elo_validate_key)(efx_nic_t *,
+ caddr_t, uint32_t);
+ efx_rc_t (*elo_read_key)(efx_nic_t *,
+ caddr_t, size_t, uint32_t, uint32_t,
+ caddr_t, size_t, uint32_t *);
+ efx_rc_t (*elo_write_key)(efx_nic_t *,
+ caddr_t, size_t, uint32_t,
+ caddr_t, uint32_t, uint32_t *);
+ efx_rc_t (*elo_delete_key)(efx_nic_t *,
+ caddr_t, size_t, uint32_t,
+ uint32_t, uint32_t, uint32_t *);
+ efx_rc_t (*elo_create_partition)(efx_nic_t *,
+ caddr_t, size_t);
+ efx_rc_t (*elo_finish_partition)(efx_nic_t *,
+ caddr_t, size_t);
+} efx_lic_ops_t;
+
+#endif
+
+typedef struct efx_drv_cfg_s {
+ uint32_t edc_min_vi_count;
+ uint32_t edc_max_vi_count;
+
+ uint32_t edc_max_piobuf_count;
+ uint32_t edc_pio_alloc_size;
+} efx_drv_cfg_t;
+
+struct efx_nic_s {
+ uint32_t en_magic;
+ efx_family_t en_family;
+ uint32_t en_features;
+ efsys_identifier_t *en_esip;
+ efsys_lock_t *en_eslp;
+ efsys_bar_t *en_esbp;
+ unsigned int en_mod_flags;
+ unsigned int en_reset_flags;
+ efx_nic_cfg_t en_nic_cfg;
+ efx_drv_cfg_t en_drv_cfg;
+ efx_port_t en_port;
+ efx_mon_t en_mon;
+ efx_intr_t en_intr;
+ uint32_t en_ev_qcount;
+ uint32_t en_rx_qcount;
+ uint32_t en_tx_qcount;
+ const efx_nic_ops_t *en_enop;
+ const efx_ev_ops_t *en_eevop;
+ const efx_tx_ops_t *en_etxop;
+ const efx_rx_ops_t *en_erxop;
+#if EFSYS_OPT_FILTER
+ efx_filter_t en_filter;
+ const efx_filter_ops_t *en_efop;
+#endif /* EFSYS_OPT_FILTER */
+#if EFSYS_OPT_MCDI
+ efx_mcdi_t en_mcdi;
+#endif /* EFSYS_OPT_MCDI */
+#if EFSYS_OPT_NVRAM
+ efx_nvram_type_t en_nvram_locked;
+ const efx_nvram_ops_t *en_envop;
+#endif /* EFSYS_OPT_NVRAM */
+#if EFSYS_OPT_VPD
+ const efx_vpd_ops_t *en_evpdop;
+#endif /* EFSYS_OPT_VPD */
+#if EFSYS_OPT_RX_SCALE
+ efx_rx_hash_support_t en_hash_support;
+ efx_rx_scale_support_t en_rss_support;
+ uint32_t en_rss_context;
+#endif /* EFSYS_OPT_RX_SCALE */
+ uint32_t en_vport_id;
+#if EFSYS_OPT_LICENSING
+ const efx_lic_ops_t *en_elop;
+ boolean_t en_licensing_supported;
+#endif
+ union {
+#if EFSYS_OPT_SIENA
+ struct {
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+ unsigned int enu_partn_mask;
+#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
+#if EFSYS_OPT_VPD
+ caddr_t enu_svpd;
+ size_t enu_svpd_length;
+#endif /* EFSYS_OPT_VPD */
+ int enu_unused;
+ } siena;
+#endif /* EFSYS_OPT_SIENA */
+ int enu_unused;
+ } en_u;
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+ union en_arch {
+ struct {
+ int ena_vi_base;
+ int ena_vi_count;
+ int ena_vi_shift;
+#if EFSYS_OPT_VPD
+ caddr_t ena_svpd;
+ size_t ena_svpd_length;
+#endif /* EFSYS_OPT_VPD */
+ efx_piobuf_handle_t ena_piobuf_handle[EF10_MAX_PIOBUF_NBUFS];
+ uint32_t ena_piobuf_count;
+ uint32_t ena_pio_alloc_map[EF10_MAX_PIOBUF_NBUFS];
+ uint32_t ena_pio_write_vi_base;
+ /* Memory BAR mapping regions */
+ uint32_t ena_uc_mem_map_offset;
+ size_t ena_uc_mem_map_size;
+ uint32_t ena_wc_mem_map_offset;
+ size_t ena_wc_mem_map_size;
+ } ef10;
+ } en_arch;
+#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */
+};
+
+
+#define EFX_NIC_MAGIC 0x02121996
+
+typedef boolean_t (*efx_ev_handler_t)(efx_evq_t *, efx_qword_t *,
+ const efx_ev_callbacks_t *, void *);
+
+typedef struct efx_evq_rxq_state_s {
+ unsigned int eers_rx_read_ptr;
+ unsigned int eers_rx_mask;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ unsigned int eers_rx_stream_npackets;
+ boolean_t eers_rx_packed_stream;
+ unsigned int eers_rx_packed_stream_credits;
+#endif
+} efx_evq_rxq_state_t;
+
+struct efx_evq_s {
+ uint32_t ee_magic;
+ efx_nic_t *ee_enp;
+ unsigned int ee_index;
+ unsigned int ee_mask;
+ efsys_mem_t *ee_esmp;
+#if EFSYS_OPT_QSTATS
+ uint32_t ee_stat[EV_NQSTATS];
+#endif /* EFSYS_OPT_QSTATS */
+
+ efx_ev_handler_t ee_rx;
+ efx_ev_handler_t ee_tx;
+ efx_ev_handler_t ee_driver;
+ efx_ev_handler_t ee_global;
+ efx_ev_handler_t ee_drv_gen;
+#if EFSYS_OPT_MCDI
+ efx_ev_handler_t ee_mcdi;
+#endif /* EFSYS_OPT_MCDI */
+
+ efx_evq_rxq_state_t ee_rxq_state[EFX_EV_RX_NLABELS];
+
+ uint32_t ee_flags;
+};
+
+#define EFX_EVQ_MAGIC 0x08081997
+
+#define EFX_EVQ_SIENA_TIMER_QUANTUM_NS 6144 /* 768 cycles */
+
+struct efx_rxq_s {
+ uint32_t er_magic;
+ efx_nic_t *er_enp;
+ efx_evq_t *er_eep;
+ unsigned int er_index;
+ unsigned int er_label;
+ unsigned int er_mask;
+ efsys_mem_t *er_esmp;
+};
+
+#define EFX_RXQ_MAGIC 0x15022005
+
+struct efx_txq_s {
+ uint32_t et_magic;
+ efx_nic_t *et_enp;
+ unsigned int et_index;
+ unsigned int et_mask;
+ efsys_mem_t *et_esmp;
+#if EFSYS_OPT_HUNTINGTON
+ uint32_t et_pio_bufnum;
+ uint32_t et_pio_blknum;
+ uint32_t et_pio_write_offset;
+ uint32_t et_pio_offset;
+ size_t et_pio_size;
+#endif
+#if EFSYS_OPT_QSTATS
+ uint32_t et_stat[TX_NQSTATS];
+#endif /* EFSYS_OPT_QSTATS */
+};
+
+#define EFX_TXQ_MAGIC 0x05092005
+
+#define EFX_MAC_ADDR_COPY(_dst, _src) \
+ do { \
+ (_dst)[0] = (_src)[0]; \
+ (_dst)[1] = (_src)[1]; \
+ (_dst)[2] = (_src)[2]; \
+ (_dst)[3] = (_src)[3]; \
+ (_dst)[4] = (_src)[4]; \
+ (_dst)[5] = (_src)[5]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_MAC_BROADCAST_ADDR_SET(_dst) \
+ do { \
+ uint16_t *_d = (uint16_t *)(_dst); \
+ _d[0] = 0xffff; \
+ _d[1] = 0xffff; \
+ _d[2] = 0xffff; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#if EFSYS_OPT_CHECK_REG
+#define EFX_CHECK_REG(_enp, _reg) \
+ do { \
+ const char *name = #_reg; \
+ char min = name[4]; \
+ char max = name[5]; \
+ char rev; \
+ \
+ switch ((_enp)->en_family) { \
+ case EFX_FAMILY_SIENA: \
+ rev = 'C'; \
+ break; \
+ \
+ case EFX_FAMILY_HUNTINGTON: \
+ rev = 'D'; \
+ break; \
+ \
+ case EFX_FAMILY_MEDFORD: \
+ rev = 'E'; \
+ break; \
+ \
+ default: \
+ rev = '?'; \
+ break; \
+ } \
+ \
+ EFSYS_ASSERT3S(rev, >=, min); \
+ EFSYS_ASSERT3S(rev, <=, max); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_CHECK_REG(_enp, _reg) do { \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#endif
+
+#define EFX_BAR_READD(_enp, _reg, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READD((_enp)->en_esbp, _reg ## _OFST, \
+ (_edp), (_lock)); \
+ EFSYS_PROBE3(efx_bar_readd, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_WRITED(_enp, _reg, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE3(efx_bar_writed, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, _reg ## _OFST, \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_READQ(_enp, _reg, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READQ((_enp)->en_esbp, _reg ## _OFST, \
+ (_eqp)); \
+ EFSYS_PROBE4(efx_bar_readq, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_WRITEQ(_enp, _reg, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_writeq, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ EFSYS_BAR_WRITEQ((_enp)->en_esbp, _reg ## _OFST, \
+ (_eqp)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_READO(_enp, _reg, _eop) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READO((_enp)->en_esbp, _reg ## _OFST, \
+ (_eop), B_TRUE); \
+ EFSYS_PROBE6(efx_bar_reado, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_WRITEO(_enp, _reg, _eop) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE6(efx_bar_writeo, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ EFSYS_BAR_WRITEO((_enp)->en_esbp, _reg ## _OFST, \
+ (_eop), B_TRUE); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_READD(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READD((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ EFSYS_PROBE4(efx_bar_tbl_readd, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITED(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITED2(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ (_reg ## _OFST + \
+ (2 * sizeof (efx_dword_t)) + \
+ ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITED3(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ (_reg ## _OFST + \
+ (3 * sizeof (efx_dword_t)) + \
+ ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_READQ(_enp, _reg, _index, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READQ((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eqp)); \
+ EFSYS_PROBE5(efx_bar_tbl_readq, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITEQ(_enp, _reg, _index, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE5(efx_bar_tbl_writeq, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ EFSYS_BAR_WRITEQ((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eqp)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_READO(_enp, _reg, _index, _eop, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READO((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eop), (_lock)); \
+ EFSYS_PROBE7(efx_bar_tbl_reado, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITEO(_enp, _reg, _index, _eop, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE7(efx_bar_tbl_writeo, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ EFSYS_BAR_WRITEO((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eop), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/*
+ * Allow drivers to perform optimised 128-bit doorbell writes.
+ * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
+ * special-cased in the BIU on the Falcon/Siena and EF10 architectures to avoid
+ * the need for locking in the host, and are the only ones known to be safe to
+ * use 128-bites write with.
+ */
+#define EFX_BAR_TBL_DOORBELL_WRITEO(_enp, _reg, _index, _eop) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE7(efx_bar_tbl_doorbell_writeo, \
+ const char *, \
+ #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ EFSYS_BAR_DOORBELL_WRITEO((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eop)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_DMA_SYNC_QUEUE_FOR_DEVICE(_esmp, _entries, _wptr, _owptr) \
+ do { \
+ unsigned int _new = (_wptr); \
+ unsigned int _old = (_owptr); \
+ \
+ if ((_new) >= (_old)) \
+ EFSYS_DMA_SYNC_FOR_DEVICE((_esmp), \
+ (_old) * sizeof (efx_desc_t), \
+ ((_new) - (_old)) * sizeof (efx_desc_t)); \
+ else \
+ /* \
+ * It is cheaper to sync entire map than sync \
+ * two parts especially when offset/size are \
+ * ignored and entire map is synced in any case.\
+ */ \
+ EFSYS_DMA_SYNC_FOR_DEVICE((_esmp), \
+ 0, \
+ (_entries) * sizeof (efx_desc_t)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+extern __checkReturn efx_rc_t
+efx_nic_biu_test(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mac_select(
+ __in efx_nic_t *enp);
+
+extern void
+efx_mac_multicast_hash_compute(
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count,
+ __out efx_oword_t *hash_low,
+ __out efx_oword_t *hash_high);
+
+extern __checkReturn efx_rc_t
+efx_phy_probe(
+ __in efx_nic_t *enp);
+
+extern void
+efx_phy_unprobe(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_VPD
+
+/* VPD utility functions */
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_length(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out size_t *lengthp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_verify(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out_opt boolean_t *cksummedp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_reinit(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t wantpid);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_get(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_tag_t tag,
+ __in efx_vpd_keyword_t keyword,
+ __out unsigned int *payloadp,
+ __out uint8_t *paylenp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_next(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_tag_t *tagp,
+ __out efx_vpd_keyword_t *keyword,
+ __out_opt unsigned int *payloadp,
+ __out_opt uint8_t *paylenp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_set(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+#endif /* EFSYS_OPT_VPD */
+
+#if EFSYS_OPT_DIAG
+
+extern efx_sram_pattern_fn_t __efx_sram_pattern_fns[];
+
+typedef struct efx_register_set_s {
+ unsigned int address;
+ unsigned int step;
+ unsigned int rows;
+ efx_oword_t mask;
+} efx_register_set_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_test_registers(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in size_t count);
+
+extern __checkReturn efx_rc_t
+efx_nic_test_tables(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in efx_pattern_type_t pattern,
+ __in size_t count);
+
+#endif /* EFSYS_OPT_DIAG */
+
+#if EFSYS_OPT_MCDI
+
+extern __checkReturn efx_rc_t
+efx_mcdi_set_workaround(
+ __in efx_nic_t *enp,
+ __in uint32_t type,
+ __in boolean_t enabled,
+ __out_opt uint32_t *flagsp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_workarounds(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *implementedp,
+ __out_opt uint32_t *enabledp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+#if EFSYS_OPT_MAC_STATS
+
+/*
+ * Closed range of stats (i.e. the first and the last are included).
+ * The last must be greater or equal (if the range is one item only) to
+ * the first.
+ */
+struct efx_mac_stats_range {
+ efx_mac_stat_t first;
+ efx_mac_stat_t last;
+};
+
+extern efx_rc_t
+efx_mac_stats_mask_add_ranges(
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size,
+ __in_ecount(rng_count) const struct efx_mac_stats_range *rngp,
+ __in unsigned int rng_count);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_IMPL_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_intr.c b/src/seastar/dpdk/drivers/net/sfc/base/efx_intr.c
new file mode 100644
index 00000000..f0422d53
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_intr.c
@@ -0,0 +1,572 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp);
+
+static void
+siena_intr_enable(
+ __in efx_nic_t *enp);
+
+static void
+siena_intr_disable(
+ __in efx_nic_t *enp);
+
+static void
+siena_intr_disable_unlocked(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level);
+
+static void
+siena_intr_fini(
+ __in efx_nic_t *enp);
+
+static void
+siena_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp);
+
+static void
+siena_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp);
+
+static void
+siena_intr_fatal(
+ __in efx_nic_t *enp);
+
+static __checkReturn boolean_t
+siena_intr_check_fatal(
+ __in efx_nic_t *enp);
+
+
+#endif /* EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+static const efx_intr_ops_t __efx_intr_siena_ops = {
+ siena_intr_init, /* eio_init */
+ siena_intr_enable, /* eio_enable */
+ siena_intr_disable, /* eio_disable */
+ siena_intr_disable_unlocked, /* eio_disable_unlocked */
+ siena_intr_trigger, /* eio_trigger */
+ siena_intr_status_line, /* eio_status_line */
+ siena_intr_status_message, /* eio_status_message */
+ siena_intr_fatal, /* eio_fatal */
+ siena_intr_fini, /* eio_fini */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_intr_ops_t __efx_intr_ef10_ops = {
+ ef10_intr_init, /* eio_init */
+ ef10_intr_enable, /* eio_enable */
+ ef10_intr_disable, /* eio_disable */
+ ef10_intr_disable_unlocked, /* eio_disable_unlocked */
+ ef10_intr_trigger, /* eio_trigger */
+ ef10_intr_status_line, /* eio_status_line */
+ ef10_intr_status_message, /* eio_status_message */
+ ef10_intr_fatal, /* eio_fatal */
+ ef10_intr_fini, /* eio_fini */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enp->en_mod_flags & EFX_MOD_INTR) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ eip->ei_esmp = esmp;
+ eip->ei_type = type;
+ eip->ei_level = 0;
+
+ enp->en_mod_flags |= EFX_MOD_INTR;
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ eiop = &__efx_intr_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ eiop = &__efx_intr_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ eiop = &__efx_intr_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ if ((rc = eiop->eio_init(enp, type, esmp)) != 0)
+ goto fail3;
+
+ eip->ei_eiop = eiop;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_intr_fini(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_fini(enp);
+
+ enp->en_mod_flags &= ~EFX_MOD_INTR;
+}
+
+ void
+efx_intr_enable(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_enable(enp);
+}
+
+ void
+efx_intr_disable(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_disable(enp);
+}
+
+ void
+efx_intr_disable_unlocked(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_disable_unlocked(enp);
+}
+
+
+ __checkReturn efx_rc_t
+efx_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ return (eiop->eio_trigger(enp, level));
+}
+
+ void
+efx_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_status_line(enp, fatalp, qmaskp);
+}
+
+ void
+efx_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_status_message(enp, message, fatalp);
+}
+
+ void
+efx_intr_fatal(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_fatal(enp);
+}
+
+
+/* ************************************************************************* */
+/* ************************************************************************* */
+/* ************************************************************************* */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_oword_t oword;
+
+ /*
+ * bug17213 workaround.
+ *
+ * Under legacy interrupts, don't share a level between fatal
+ * interrupts and event queue interrupts. Under MSI-X, they
+ * must share, or we won't get an interrupt.
+ */
+ if (enp->en_family == EFX_FAMILY_SIENA &&
+ eip->ei_type == EFX_INTR_LINE)
+ eip->ei_level = 0x1f;
+ else
+ eip->ei_level = 0;
+
+ /* Enable all the genuinely fatal interrupts */
+ EFX_SET_OWORD(oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_ILL_ADR_INT_KER_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RBUF_OWN_INT_KER_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TBUF_OWN_INT_KER_EN, 0);
+ if (enp->en_family >= EFX_FAMILY_SIENA)
+ EFX_SET_OWORD_FIELD(oword, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_FATAL_INTR_REG_KER, &oword);
+
+ /* Set up the interrupt address register */
+ EFX_POPULATE_OWORD_3(oword,
+ FRF_AZ_NORM_INT_VEC_DIS_KER, (type == EFX_INTR_MESSAGE) ? 1 : 0,
+ FRF_AZ_INT_ADR_KER_DW0, EFSYS_MEM_ADDR(esmp) & 0xffffffff,
+ FRF_AZ_INT_ADR_KER_DW1, EFSYS_MEM_ADDR(esmp) >> 32);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword);
+
+ return (0);
+}
+
+static void
+siena_intr_enable(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, eip->ei_level);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+}
+
+static void
+siena_intr_disable(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ EFSYS_SPIN(10);
+}
+
+static void
+siena_intr_disable_unlocked(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ EFSYS_BAR_READO(enp->en_esbp, FR_AZ_INT_EN_REG_KER_OFST,
+ &oword, B_FALSE);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 0);
+ EFSYS_BAR_WRITEO(enp->en_esbp, FR_AZ_INT_EN_REG_KER_OFST,
+ &oword, B_FALSE);
+}
+
+static __checkReturn efx_rc_t
+siena_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_oword_t oword;
+ unsigned int count;
+ uint32_t sel;
+ efx_rc_t rc;
+
+ /* bug16757: No event queues can be initialized */
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+
+ if (level >= EFX_NINTR_SIENA) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (level > EFX_MASK32(FRF_AZ_KER_INT_LEVE_SEL))
+ return (ENOTSUP); /* avoid EFSYS_PROBE() */
+
+ sel = level;
+
+ /* Trigger a test interrupt */
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, sel);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ /*
+ * Wait up to 100ms for the interrupt to be raised before restoring
+ * KER_INT_LEVE_SEL. Ignore a failure to raise (the caller will
+ * observe this soon enough anyway), but always reset KER_INT_LEVE_SEL
+ */
+ count = 0;
+ do {
+ EFSYS_SPIN(100); /* 100us */
+
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+ } while (EFX_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER) && ++count < 1000);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, eip->ei_level);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn boolean_t
+siena_intr_check_fatal(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efsys_mem_t *esmp = eip->ei_esmp;
+ efx_oword_t oword;
+
+ /* Read the syndrome */
+ EFSYS_MEM_READO(esmp, 0, &oword);
+
+ if (EFX_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT) != 0) {
+ EFSYS_PROBE(fatal);
+
+ /* Clear the fatal interrupt condition */
+ EFX_SET_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT, 0);
+ EFSYS_MEM_WRITEO(esmp, 0, &oword);
+
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+static void
+siena_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_dword_t dword;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ /*
+ * Read the queue mask and implicitly acknowledge the
+ * interrupt.
+ */
+ EFX_BAR_READD(enp, FR_BZ_INT_ISR0_REG, &dword, B_FALSE);
+ *qmaskp = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+
+ EFSYS_PROBE1(qmask, uint32_t, *qmaskp);
+
+ if (*qmaskp & (1U << eip->ei_level))
+ *fatalp = siena_intr_check_fatal(enp);
+ else
+ *fatalp = B_FALSE;
+}
+
+static void
+siena_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ if (message == eip->ei_level)
+ *fatalp = siena_intr_check_fatal(enp);
+ else
+ *fatalp = B_FALSE;
+}
+
+
+static void
+siena_intr_fatal(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_DECODE_INTR_FATAL
+ efx_oword_t fatal;
+ efx_oword_t mem_per;
+
+ EFX_BAR_READO(enp, FR_AZ_FATAL_INTR_REG_KER, &fatal);
+ EFX_ZERO_OWORD(mem_per);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0 ||
+ EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0)
+ EFX_BAR_READO(enp, FR_AZ_MEM_STAT_REG, &mem_per);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRAM_OOB_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_OOB, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_BUFID_DC_OOB_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_BUFID_DC_OOB, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_MEM_PERR,
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_0),
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_1));
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_RBUF_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_RBUF_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_TBUF_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_TBUF_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_RDESCQ_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_RDESQ_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_TDESCQ_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_TDESQ_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVQ_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_EVQ_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVF_OFLO_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_EVFF_OFLO, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_ILL_ADR_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_ILL_ADDR, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_PERR,
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_0),
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_1));
+#else
+ EFSYS_ASSERT(0);
+#endif
+}
+
+static void
+siena_intr_fini(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /* Clear the interrupt address register */
+ EFX_ZERO_OWORD(oword);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword);
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_lic.c b/src/seastar/dpdk/drivers/net/sfc/base/efx_lic.c
new file mode 100644
index 00000000..2cd05cc8
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_lic.c
@@ -0,0 +1,1751 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_LICENSING
+
+#include "ef10_tlv_layout.h"
+
+#if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+#endif /* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_update_license(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp);
+
+static const efx_lic_ops_t __efx_lic_v1_ops = {
+ efx_mcdi_fc_license_update_license, /* elo_update_licenses */
+ efx_mcdi_fc_license_get_key_stats, /* elo_get_key_stats */
+ NULL, /* elo_app_state */
+ NULL, /* elo_get_id */
+ efx_lic_v1v2_find_start, /* elo_find_start */
+ efx_lic_v1v2_find_end, /* elo_find_end */
+ efx_lic_v1v2_find_key, /* elo_find_key */
+ efx_lic_v1v2_validate_key, /* elo_validate_key */
+ efx_lic_v1v2_read_key, /* elo_read_key */
+ efx_lic_v1v2_write_key, /* elo_write_key */
+ efx_lic_v1v2_delete_key, /* elo_delete_key */
+ efx_lic_v1v2_create_partition, /* elo_create_partition */
+ efx_lic_v1v2_finish_partition, /* elo_finish_partition */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_update_licenses(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensed_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp);
+
+static const efx_lic_ops_t __efx_lic_v2_ops = {
+ efx_mcdi_licensing_update_licenses, /* elo_update_licenses */
+ efx_mcdi_licensing_get_key_stats, /* elo_get_key_stats */
+ efx_mcdi_licensed_app_state, /* elo_app_state */
+ NULL, /* elo_get_id */
+ efx_lic_v1v2_find_start, /* elo_find_start */
+ efx_lic_v1v2_find_end, /* elo_find_end */
+ efx_lic_v1v2_find_key, /* elo_find_key */
+ efx_lic_v1v2_validate_key, /* elo_validate_key */
+ efx_lic_v1v2_read_key, /* elo_read_key */
+ efx_lic_v1v2_write_key, /* elo_write_key */
+ efx_lic_v1v2_delete_key, /* elo_delete_key */
+ efx_lic_v1v2_create_partition, /* elo_create_partition */
+ efx_lic_v1v2_finish_partition, /* elo_finish_partition */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_update_licenses(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_report_license(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_bcount_part_opt(buffer_size, *lengthp)
+ uint8_t *bufferp);
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+static const efx_lic_ops_t __efx_lic_v3_ops = {
+ efx_mcdi_licensing_v3_update_licenses, /* elo_update_licenses */
+ efx_mcdi_licensing_v3_report_license, /* elo_get_key_stats */
+ efx_mcdi_licensing_v3_app_state, /* elo_app_state */
+ efx_mcdi_licensing_v3_get_id, /* elo_get_id */
+ efx_lic_v3_find_start, /* elo_find_start*/
+ efx_lic_v3_find_end, /* elo_find_end */
+ efx_lic_v3_find_key, /* elo_find_key */
+ efx_lic_v3_validate_key, /* elo_validate_key */
+ efx_lic_v3_read_key, /* elo_read_key */
+ efx_lic_v3_write_key, /* elo_write_key */
+ efx_lic_v3_delete_key, /* elo_delete_key */
+ efx_lic_v3_create_partition, /* elo_create_partition */
+ efx_lic_v3_finish_partition, /* elo_finish_partition */
+};
+
+#endif /* EFSYS_OPT_MEDFORD */
+
+
+/* V1 Licensing - used in Siena Modena only */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_update_license(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_FC_IN_LICENSE_LEN];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, FC_IN_CMD,
+ MC_CMD_FC_OP_LICENSE);
+
+ MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP,
+ MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used != 0) {
+ rc = EIO;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FC_IN_LICENSE_LEN,
+ MC_CMD_FC_OUT_LICENSE_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FC_OUT_LICENSE_LEN;
+
+ MCDI_IN_SET_DWORD(req, FC_IN_CMD,
+ MC_CMD_FC_OP_LICENSE);
+
+ MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP,
+ MC_CMD_FC_IN_LICENSE_GET_KEY_STATS);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_FC_OUT_LICENSE_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ eksp->eks_valid =
+ MCDI_OUT_DWORD(req, FC_OUT_LICENSE_VALID_KEYS);
+ eksp->eks_invalid =
+ MCDI_OUT_DWORD(req, FC_OUT_LICENSE_INVALID_KEYS);
+ eksp->eks_blacklisted =
+ MCDI_OUT_DWORD(req, FC_OUT_LICENSE_BLACKLISTED_KEYS);
+ eksp->eks_unverifiable = 0;
+ eksp->eks_wrong_node = 0;
+ eksp->eks_licensed_apps_lo = 0;
+ eksp->eks_licensed_apps_hi = 0;
+ eksp->eks_licensed_features_lo = 0;
+ eksp->eks_licensed_features_hi = 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+/* V1 and V2 Partition format - based on a 16-bit TLV format */
+
+#if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON
+
+/*
+ * V1/V2 format - defined in SF-108542-TC section 4.2:
+ * Type (T): 16bit - revision/HMAC algorithm
+ * Length (L): 16bit - value length in bytes
+ * Value (V): L bytes - payload
+ */
+#define EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX (256)
+#define EFX_LICENSE_V1V2_HEADER_LENGTH (2 * sizeof(uint16_t))
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ )
+{
+ _NOTE(ARGUNUSED(enp, bufferp, buffer_size))
+
+ *startp = 0;
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ )
+{
+ _NOTE(ARGUNUSED(enp, bufferp, buffer_size))
+
+ *endp = offset + EFX_LICENSE_V1V2_HEADER_LENGTH;
+ return (0);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ )
+{
+ boolean_t found;
+ uint16_t tlv_type;
+ uint16_t tlv_length;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if ((size_t)buffer_size - offset < EFX_LICENSE_V1V2_HEADER_LENGTH)
+ goto fail1;
+
+ tlv_type = __LE_TO_CPU_16(((uint16_t *)&bufferp[offset])[0]);
+ tlv_length = __LE_TO_CPU_16(((uint16_t *)&bufferp[offset])[1]);
+ if ((tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) ||
+ (tlv_type == 0 && tlv_length == 0)) {
+ found = B_FALSE;
+ } else {
+ *startp = offset;
+ *lengthp = tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH;
+ found = B_TRUE;
+ }
+ return (found);
+
+fail1:
+ EFSYS_PROBE(fail1);
+
+ return (B_FALSE);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ )
+{
+ uint16_t tlv_type;
+ uint16_t tlv_length;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (length < EFX_LICENSE_V1V2_HEADER_LENGTH) {
+ goto fail1;
+ }
+
+ tlv_type = __LE_TO_CPU_16(((uint16_t *)keyp)[0]);
+ tlv_length = __LE_TO_CPU_16(((uint16_t *)keyp)[1]);
+
+ if (tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) {
+ goto fail2;
+ }
+ if (tlv_type == 0) {
+ goto fail3;
+ }
+ if ((tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH) != length) {
+ goto fail4;
+ }
+
+ return (B_TRUE);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE(fail1);
+
+ return (B_FALSE);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ )
+{
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX +
+ EFX_LICENSE_V1V2_HEADER_LENGTH));
+
+ if (key_max_size < length) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+ memcpy(keyp, &bufferp[offset], length);
+
+ *lengthp = length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ )
+{
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX +
+ EFX_LICENSE_V1V2_HEADER_LENGTH));
+
+ /* Ensure space for terminator remains */
+ if ((offset + length) >
+ (buffer_size - EFX_LICENSE_V1V2_HEADER_LENGTH)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ memcpy(bufferp + offset, keyp, length);
+
+ *lengthp = length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ )
+{
+ uint32_t move_start = offset + length;
+ uint32_t move_length = end - move_start;
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(end <= buffer_size);
+
+ /* Shift everything after the key down */
+ memmove(bufferp + offset, bufferp + move_start, move_length);
+
+ *deltap = length;
+
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(EFX_LICENSE_V1V2_HEADER_LENGTH <= buffer_size);
+
+ /* Write terminator */
+ memset(bufferp, '\0', EFX_LICENSE_V1V2_HEADER_LENGTH);
+ return (0);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ _NOTE(ARGUNUSED(enp, bufferp, buffer_size))
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */
+
+
+/* V2 Licensing - used by Huntington family only. See SF-113611-TC */
+
+#if EFSYS_OPT_HUNTINGTON
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensed_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LICENSED_APP_STATE_IN_LEN,
+ MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN)];
+ uint32_t app_state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
+
+ /* V2 licensing supports 32bit app id only */
+ if ((app_id >> 32) != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LICENSED_APP_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LICENSED_APP_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, GET_LICENSED_APP_STATE_IN_APP_ID,
+ app_id & 0xffffffff);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ app_state = (MCDI_OUT_DWORD(req, GET_LICENSED_APP_STATE_OUT_STATE));
+ if (app_state != MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED) {
+ *licensedp = B_TRUE;
+ } else {
+ *licensedp = B_FALSE;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_update_licenses(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_LICENSING_IN_LEN];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_IN_OP,
+ MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used != 0) {
+ rc = EIO;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LICENSING_IN_LEN,
+ MC_CMD_LICENSING_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LICENSING_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_IN_OP,
+ MC_CMD_LICENSING_IN_OP_GET_KEY_STATS);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_LICENSING_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ eksp->eks_valid =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_VALID_APP_KEYS);
+ eksp->eks_invalid =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_INVALID_APP_KEYS);
+ eksp->eks_blacklisted =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_BLACKLISTED_APP_KEYS);
+ eksp->eks_unverifiable =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_UNVERIFIABLE_APP_KEYS);
+ eksp->eks_wrong_node =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_WRONG_NODE_APP_KEYS);
+ eksp->eks_licensed_apps_lo = 0;
+ eksp->eks_licensed_apps_hi = 0;
+ eksp->eks_licensed_features_lo = 0;
+ eksp->eks_licensed_features_hi = 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+/* V3 Licensing - used starting from Medford family. See SF-114884-SW */
+
+#if EFSYS_OPT_MEDFORD
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_update_licenses(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_LICENSING_V3_IN_LEN];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING_V3;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_V3_IN_OP,
+ MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_report_license(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LICENSING_V3_IN_LEN,
+ MC_CMD_LICENSING_V3_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING_V3;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LICENSING_V3_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_V3_IN_OP,
+ MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_LICENSING_V3_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ eksp->eks_valid =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_VALID_KEYS);
+ eksp->eks_invalid =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_INVALID_KEYS);
+ eksp->eks_blacklisted = 0;
+ eksp->eks_unverifiable =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_UNVERIFIABLE_KEYS);
+ eksp->eks_wrong_node =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_WRONG_NODE_KEYS);
+ eksp->eks_licensed_apps_lo =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_APPS_LO);
+ eksp->eks_licensed_apps_hi =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_APPS_HI);
+ eksp->eks_licensed_features_lo =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_FEATURES_LO);
+ eksp->eks_licensed_features_hi =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_FEATURES_HI);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN,
+ MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN)];
+ uint32_t app_state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LICENSED_V3_APP_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO,
+ app_id & 0xffffffff);
+ MCDI_IN_SET_DWORD(req, GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI,
+ app_id >> 32);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ app_state = (MCDI_OUT_DWORD(req, GET_LICENSED_V3_APP_STATE_OUT_STATE));
+ if (app_state != MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED) {
+ *licensedp = B_TRUE;
+ } else {
+ *licensedp = B_FALSE;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_bcount_part_opt(buffer_size, *lengthp)
+ uint8_t *bufferp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LICENSING_GET_ID_V3_IN_LEN,
+ MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN)];
+ efx_rc_t rc;
+
+ req.emr_cmd = MC_CMD_LICENSING_GET_ID_V3;
+
+ if (bufferp == NULL) {
+ /* Request id type and length only */
+ req.emr_in_buf = bufferp;
+ req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
+ req.emr_out_buf = bufferp;
+ req.emr_out_length = MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN;
+ (void) memset(payload, 0, sizeof (payload));
+ } else {
+ /* Request full buffer */
+ req.emr_in_buf = bufferp;
+ req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
+ req.emr_out_buf = bufferp;
+ req.emr_out_length = MIN(buffer_size, MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX);
+ (void) memset(bufferp, 0, req.emr_out_length);
+ }
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *typep = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_TYPE);
+ *lengthp = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH);
+
+ if (bufferp == NULL) {
+ /* modify length requirements to indicate to caller the extra buffering
+ ** needed to read the complete output.
+ */
+ *lengthp += MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN;
+ } else {
+ /* Shift ID down to start of buffer */
+ memmove(bufferp,
+ bufferp + MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST,
+ *lengthp);
+ memset(bufferp + (*lengthp), 0,
+ MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST);
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* V3 format uses Huntington TLV format partition. See SF-108797-SW */
+#define EFX_LICENSE_V3_KEY_LENGTH_MIN (64)
+#define EFX_LICENSE_V3_KEY_LENGTH_MAX (160)
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_find_item_start(bufferp, buffer_size, startp);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_find_end(bufferp, buffer_size, offset, endp);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_find_item(bufferp, buffer_size,
+ offset, startp, lengthp);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ )
+{
+ /* Check key is a valid V3 key */
+ uint8_t key_type;
+ uint8_t key_length;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (length < EFX_LICENSE_V3_KEY_LENGTH_MIN) {
+ goto fail1;
+ }
+
+ if (length > EFX_LICENSE_V3_KEY_LENGTH_MAX) {
+ goto fail2;
+ }
+
+ key_type = ((uint8_t *)keyp)[0];
+ key_length = ((uint8_t *)keyp)[1];
+
+ if (key_type < 3) {
+ goto fail3;
+ }
+ if (key_length > length) {
+ goto fail4;
+ }
+ return (B_TRUE);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE(fail1);
+
+ return (B_FALSE);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_get_item(bufferp, buffer_size,
+ offset, length, keyp, key_max_size, lengthp);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(length <= EFX_LICENSE_V3_KEY_LENGTH_MAX);
+
+ return ef10_nvram_buffer_insert_item(bufferp, buffer_size,
+ offset, keyp, length, lengthp);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ )
+{
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if ((rc = ef10_nvram_buffer_delete_item(bufferp,
+ buffer_size, offset, length, end)) != 0) {
+ goto fail1;
+ }
+
+ *deltap = length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ efx_rc_t rc;
+
+ /* Construct empty partition */
+ if ((rc = ef10_nvram_buffer_create(enp,
+ NVRAM_PARTITION_TYPE_LICENSE,
+ bufferp, buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ efx_rc_t rc;
+
+ if ((rc = ef10_nvram_buffer_finish(bufferp,
+ buffer_size)) != 0) {
+ goto fail1;
+ }
+
+ /* Validate completed partition */
+ if ((rc = ef10_nvram_buffer_validate(enp, NVRAM_PARTITION_TYPE_LICENSE,
+ bufferp, buffer_size)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#endif /* EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_lic_init(
+ __in efx_nic_t *enp)
+{
+ const efx_lic_ops_t *elop;
+ efx_key_stats_t eks;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_LIC));
+
+ switch (enp->en_family) {
+
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ elop = &__efx_lic_v1_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ elop = &__efx_lic_v2_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ elop = &__efx_lic_v3_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ enp->en_elop = elop;
+ enp->en_mod_flags |= EFX_MOD_LIC;
+
+ /* Probe for support */
+ if (efx_lic_get_key_stats(enp, &eks) == 0) {
+ enp->en_licensing_supported = B_TRUE;
+ } else {
+ enp->en_licensing_supported = B_FALSE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+extern __checkReturn boolean_t
+efx_lic_check_support(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ return enp->en_licensing_supported;
+}
+
+ void
+efx_lic_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ enp->en_elop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_LIC;
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_update_licenses(
+ __in efx_nic_t *enp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_update_licenses(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_get_key_stats(enp, eksp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if (elop->elo_app_state == NULL)
+ return (ENOTSUP);
+
+ if ((rc = elop->elo_app_state(enp, app_id, licensedp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_opt uint8_t *bufferp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if (elop->elo_get_id == NULL)
+ return (ENOTSUP);
+
+ if ((rc = elop->elo_get_id(enp, buffer_size, typep,
+ lengthp, bufferp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Buffer management API - abstracts varying TLV format used for License partition */
+
+ __checkReturn efx_rc_t
+efx_lic_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_find_start(enp, bufferp, buffer_size, startp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_find_end(enp, bufferp, buffer_size, offset, endp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ EFSYS_ASSERT(bufferp);
+ EFSYS_ASSERT(startp);
+ EFSYS_ASSERT(lengthp);
+
+ return (elop->elo_find_key(enp, bufferp, buffer_size, offset,
+ startp, lengthp));
+}
+
+
+/* Validate that the buffer contains a single key in a recognised format.
+** An empty or terminator buffer is not accepted as a valid key.
+*/
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ boolean_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_validate_key(enp, keyp, length)) == B_FALSE)
+ goto fail1;
+
+ return (B_TRUE);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_read_key(enp, bufferp, buffer_size, offset,
+ length, keyp, key_max_size, lengthp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_write_key(enp, bufferp, buffer_size, offset,
+ keyp, length, lengthp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_delete_key(enp, bufferp, buffer_size, offset,
+ length, end, deltap)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_create_partition(enp, bufferp, buffer_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_finish_partition(enp, bufferp, buffer_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LICENSING */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_mac.c b/src/seastar/dpdk/drivers/net/sfc/base/efx_mac.c
new file mode 100644
index 00000000..752e7205
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_mac.c
@@ -0,0 +1,951 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_mac_multicast_list_set(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+static const efx_mac_ops_t __efx_siena_mac_ops = {
+ siena_mac_poll, /* emo_poll */
+ siena_mac_up, /* emo_up */
+ siena_mac_reconfigure, /* emo_addr_set */
+ siena_mac_reconfigure, /* emo_pdu_set */
+ siena_mac_pdu_get, /* emo_pdu_get */
+ siena_mac_reconfigure, /* emo_reconfigure */
+ siena_mac_multicast_list_set, /* emo_multicast_list_set */
+ NULL, /* emo_filter_set_default_rxq */
+ NULL, /* emo_filter_default_rxq_clear */
+#if EFSYS_OPT_LOOPBACK
+ siena_mac_loopback_set, /* emo_loopback_set */
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_MAC_STATS
+ siena_mac_stats_get_mask, /* emo_stats_get_mask */
+ efx_mcdi_mac_stats_clear, /* emo_stats_clear */
+ efx_mcdi_mac_stats_upload, /* emo_stats_upload */
+ efx_mcdi_mac_stats_periodic, /* emo_stats_periodic */
+ siena_mac_stats_update /* emo_stats_update */
+#endif /* EFSYS_OPT_MAC_STATS */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_mac_ops_t __efx_ef10_mac_ops = {
+ ef10_mac_poll, /* emo_poll */
+ ef10_mac_up, /* emo_up */
+ ef10_mac_addr_set, /* emo_addr_set */
+ ef10_mac_pdu_set, /* emo_pdu_set */
+ ef10_mac_pdu_get, /* emo_pdu_get */
+ ef10_mac_reconfigure, /* emo_reconfigure */
+ ef10_mac_multicast_list_set, /* emo_multicast_list_set */
+ ef10_mac_filter_default_rxq_set, /* emo_filter_default_rxq_set */
+ ef10_mac_filter_default_rxq_clear,
+ /* emo_filter_default_rxq_clear */
+#if EFSYS_OPT_LOOPBACK
+ ef10_mac_loopback_set, /* emo_loopback_set */
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_MAC_STATS
+ ef10_mac_stats_get_mask, /* emo_stats_get_mask */
+ efx_mcdi_mac_stats_clear, /* emo_stats_clear */
+ efx_mcdi_mac_stats_upload, /* emo_stats_upload */
+ efx_mcdi_mac_stats_periodic, /* emo_stats_periodic */
+ ef10_mac_stats_update /* emo_stats_update */
+#endif /* EFSYS_OPT_MAC_STATS */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_mac_pdu_set(
+ __in efx_nic_t *enp,
+ __in size_t pdu)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ uint32_t old_pdu;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ if (pdu < EFX_MAC_PDU_MIN) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (pdu > EFX_MAC_PDU_MAX) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ old_pdu = epp->ep_mac_pdu;
+ epp->ep_mac_pdu = (uint32_t)pdu;
+ if ((rc = emop->emo_pdu_set(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ epp->ep_mac_pdu = old_pdu;
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ if ((rc = emop->emo_pdu_get(enp, pdu)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_addr_set(
+ __in efx_nic_t *enp,
+ __in uint8_t *addr)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ uint8_t old_addr[6];
+ uint32_t oui;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (EFX_MAC_ADDR_IS_MULTICAST(addr)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ oui = addr[0] << 16 | addr[1] << 8 | addr[2];
+ if (oui == 0x000000) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ EFX_MAC_ADDR_COPY(old_addr, epp->ep_mac_addr);
+ EFX_MAC_ADDR_COPY(epp->ep_mac_addr, addr);
+ if ((rc = emop->emo_addr_set(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFX_MAC_ADDR_COPY(epp->ep_mac_addr, old_addr);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_filter_set(
+ __in efx_nic_t *enp,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ boolean_t old_all_unicst;
+ boolean_t old_mulcst;
+ boolean_t old_all_mulcst;
+ boolean_t old_brdcst;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ old_all_unicst = epp->ep_all_unicst;
+ old_mulcst = epp->ep_mulcst;
+ old_all_mulcst = epp->ep_all_mulcst;
+ old_brdcst = epp->ep_brdcst;
+
+ epp->ep_all_unicst = all_unicst;
+ epp->ep_mulcst = mulcst;
+ epp->ep_all_mulcst = all_mulcst;
+ epp->ep_brdcst = brdcst;
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ epp->ep_all_unicst = old_all_unicst;
+ epp->ep_mulcst = old_mulcst;
+ epp->ep_all_mulcst = old_all_mulcst;
+ epp->ep_brdcst = old_brdcst;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_drain(
+ __in efx_nic_t *enp,
+ __in boolean_t enabled)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ if (epp->ep_mac_drain == enabled)
+ return (0);
+
+ epp->ep_mac_drain = enabled;
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if ((rc = emop->emo_up(enp, mac_upp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_fcntl_set(
+ __in efx_nic_t *enp,
+ __in unsigned int fcntl,
+ __in boolean_t autoneg)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ unsigned int old_fcntl;
+ boolean_t old_autoneg;
+ unsigned int old_adv_cap;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if ((fcntl & ~(EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE)) != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * Ignore a request to set flow control auto-negotiation
+ * if the PHY doesn't support it.
+ */
+ if (~epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN))
+ autoneg = B_FALSE;
+
+ old_fcntl = epp->ep_fcntl;
+ old_autoneg = epp->ep_fcntl_autoneg;
+ old_adv_cap = epp->ep_adv_cap_mask;
+
+ epp->ep_fcntl = fcntl;
+ epp->ep_fcntl_autoneg = autoneg;
+
+ /*
+ * Always encode the flow control settings in the advertised
+ * capabilities even if we are not trying to auto-negotiate
+ * them and reconfigure both the PHY and the MAC.
+ */
+ if (fcntl & EFX_FCNTL_RESPOND)
+ epp->ep_adv_cap_mask |= (1 << EFX_PHY_CAP_PAUSE |
+ 1 << EFX_PHY_CAP_ASYM);
+ else
+ epp->ep_adv_cap_mask &= ~(1 << EFX_PHY_CAP_PAUSE |
+ 1 << EFX_PHY_CAP_ASYM);
+
+ if (fcntl & EFX_FCNTL_GENERATE)
+ epp->ep_adv_cap_mask ^= (1 << EFX_PHY_CAP_ASYM);
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail2;
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ epp->ep_fcntl = old_fcntl;
+ epp->ep_fcntl_autoneg = old_autoneg;
+ epp->ep_adv_cap_mask = old_adv_cap;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_mac_fcntl_get(
+ __in efx_nic_t *enp,
+ __out unsigned int *fcntl_wantedp,
+ __out unsigned int *fcntl_linkp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ unsigned int wanted = 0;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ /*
+ * Decode the requested flow control settings from the PHY
+ * advertised capabilities.
+ */
+ if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_PAUSE))
+ wanted = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_ASYM))
+ wanted ^= EFX_FCNTL_GENERATE;
+
+ *fcntl_linkp = epp->ep_fcntl;
+ *fcntl_wantedp = wanted;
+}
+
+ __checkReturn efx_rc_t
+efx_mac_multicast_list_set(
+ __in efx_nic_t *enp,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ uint8_t *old_mulcst_addr_list = NULL;
+ uint32_t old_mulcst_addr_count;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (count > EFX_MAC_MULTICAST_LIST_MAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ old_mulcst_addr_count = epp->ep_mulcst_addr_count;
+ if (old_mulcst_addr_count > 0) {
+ /* Allocate memory to store old list (instead of using stack) */
+ EFSYS_KMEM_ALLOC(enp->en_esip,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN,
+ old_mulcst_addr_list);
+ if (old_mulcst_addr_list == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ /* Save the old list in case we need to rollback */
+ memcpy(old_mulcst_addr_list, epp->ep_mulcst_addr_list,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN);
+ }
+
+ /* Store the new list */
+ memcpy(epp->ep_mulcst_addr_list, addrs,
+ count * EFX_MAC_ADDR_LEN);
+ epp->ep_mulcst_addr_count = count;
+
+ if ((rc = emop->emo_multicast_list_set(enp)) != 0)
+ goto fail3;
+
+ if (old_mulcst_addr_count > 0) {
+ EFSYS_KMEM_FREE(enp->en_esip,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN,
+ old_mulcst_addr_list);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ /* Restore original list on failure */
+ epp->ep_mulcst_addr_count = old_mulcst_addr_count;
+ if (old_mulcst_addr_count > 0) {
+ memcpy(epp->ep_mulcst_addr_list, old_mulcst_addr_list,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN);
+
+ EFSYS_KMEM_FREE(enp->en_esip,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN,
+ old_mulcst_addr_list);
+ }
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+ __checkReturn efx_rc_t
+efx_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (emop->emo_filter_default_rxq_set != NULL) {
+ rc = emop->emo_filter_default_rxq_set(enp, erp, using_rss);
+ if (rc != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (emop->emo_filter_default_rxq_clear != NULL)
+ emop->emo_filter_default_rxq_clear(enp);
+}
+
+
+#if EFSYS_OPT_MAC_STATS
+
+#if EFSYS_OPT_NAMES
+
+/* START MKCONFIG GENERATED EfxMacStatNamesBlock c11b91b42f922516 */
+static const char * const __efx_mac_stat_name[] = {
+ "rx_octets",
+ "rx_pkts",
+ "rx_unicst_pkts",
+ "rx_multicst_pkts",
+ "rx_brdcst_pkts",
+ "rx_pause_pkts",
+ "rx_le_64_pkts",
+ "rx_65_to_127_pkts",
+ "rx_128_to_255_pkts",
+ "rx_256_to_511_pkts",
+ "rx_512_to_1023_pkts",
+ "rx_1024_to_15xx_pkts",
+ "rx_ge_15xx_pkts",
+ "rx_errors",
+ "rx_fcs_errors",
+ "rx_drop_events",
+ "rx_false_carrier_errors",
+ "rx_symbol_errors",
+ "rx_align_errors",
+ "rx_internal_errors",
+ "rx_jabber_pkts",
+ "rx_lane0_char_err",
+ "rx_lane1_char_err",
+ "rx_lane2_char_err",
+ "rx_lane3_char_err",
+ "rx_lane0_disp_err",
+ "rx_lane1_disp_err",
+ "rx_lane2_disp_err",
+ "rx_lane3_disp_err",
+ "rx_match_fault",
+ "rx_nodesc_drop_cnt",
+ "tx_octets",
+ "tx_pkts",
+ "tx_unicst_pkts",
+ "tx_multicst_pkts",
+ "tx_brdcst_pkts",
+ "tx_pause_pkts",
+ "tx_le_64_pkts",
+ "tx_65_to_127_pkts",
+ "tx_128_to_255_pkts",
+ "tx_256_to_511_pkts",
+ "tx_512_to_1023_pkts",
+ "tx_1024_to_15xx_pkts",
+ "tx_ge_15xx_pkts",
+ "tx_errors",
+ "tx_sgl_col_pkts",
+ "tx_mult_col_pkts",
+ "tx_ex_col_pkts",
+ "tx_late_col_pkts",
+ "tx_def_pkts",
+ "tx_ex_def_pkts",
+ "pm_trunc_bb_overflow",
+ "pm_discard_bb_overflow",
+ "pm_trunc_vfifo_full",
+ "pm_discard_vfifo_full",
+ "pm_trunc_qbb",
+ "pm_discard_qbb",
+ "pm_discard_mapping",
+ "rxdp_q_disabled_pkts",
+ "rxdp_di_dropped_pkts",
+ "rxdp_streaming_pkts",
+ "rxdp_hlb_fetch",
+ "rxdp_hlb_wait",
+ "vadapter_rx_unicast_packets",
+ "vadapter_rx_unicast_bytes",
+ "vadapter_rx_multicast_packets",
+ "vadapter_rx_multicast_bytes",
+ "vadapter_rx_broadcast_packets",
+ "vadapter_rx_broadcast_bytes",
+ "vadapter_rx_bad_packets",
+ "vadapter_rx_bad_bytes",
+ "vadapter_rx_overflow",
+ "vadapter_tx_unicast_packets",
+ "vadapter_tx_unicast_bytes",
+ "vadapter_tx_multicast_packets",
+ "vadapter_tx_multicast_bytes",
+ "vadapter_tx_broadcast_packets",
+ "vadapter_tx_broadcast_bytes",
+ "vadapter_tx_bad_packets",
+ "vadapter_tx_bad_bytes",
+ "vadapter_tx_overflow",
+};
+/* END MKCONFIG GENERATED EfxMacStatNamesBlock */
+
+ __checkReturn const char *
+efx_mac_stat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(id, <, EFX_MAC_NSTATS);
+ return (__efx_mac_stat_name[id]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+static efx_rc_t
+efx_mac_stats_mask_add_range(
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size,
+ __in const struct efx_mac_stats_range *rngp)
+{
+ unsigned int mask_npages = mask_size / sizeof (*maskp);
+ unsigned int el;
+ unsigned int el_min;
+ unsigned int el_max;
+ unsigned int low;
+ unsigned int high;
+ unsigned int width;
+ efx_rc_t rc;
+
+ if ((mask_npages * EFX_MAC_STATS_MASK_BITS_PER_PAGE) <=
+ (unsigned int)rngp->last) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(rngp->first, <=, rngp->last);
+ EFSYS_ASSERT3U(rngp->last, <, EFX_MAC_NSTATS);
+
+ for (el = 0; el < mask_npages; ++el) {
+ el_min = el * EFX_MAC_STATS_MASK_BITS_PER_PAGE;
+ el_max =
+ el_min + (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1);
+ if ((unsigned int)rngp->first > el_max ||
+ (unsigned int)rngp->last < el_min)
+ continue;
+ low = MAX((unsigned int)rngp->first, el_min);
+ high = MIN((unsigned int)rngp->last, el_max);
+ width = high - low + 1;
+ maskp[el] |=
+ (width == EFX_MAC_STATS_MASK_BITS_PER_PAGE) ?
+ (~0ULL) : (((1ULL << width) - 1) << (low - el_min));
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ efx_rc_t
+efx_mac_stats_mask_add_ranges(
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size,
+ __in_ecount(rng_count) const struct efx_mac_stats_range *rngp,
+ __in unsigned int rng_count)
+{
+ unsigned int i;
+ efx_rc_t rc;
+
+ for (i = 0; i < rng_count; ++i) {
+ if ((rc = efx_mac_stats_mask_add_range(maskp, mask_size,
+ &rngp[i])) != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __out_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(maskp != NULL);
+ EFSYS_ASSERT(mask_size % sizeof (maskp[0]) == 0);
+
+ (void) memset(maskp, 0, mask_size);
+
+ if ((rc = emop->emo_stats_get_mask(enp, maskp, mask_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_stats_clear(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ if ((rc = emop->emo_stats_clear(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ /*
+ * Don't assert !ep_mac_stats_pending, because the client might
+ * have failed to finalise statistics when previously stopping
+ * the port.
+ */
+ if ((rc = emop->emo_stats_upload(enp, esmp)) != 0)
+ goto fail1;
+
+ epp->ep_mac_stats_pending = B_TRUE;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT(emop != NULL);
+
+ if (emop->emo_stats_periodic == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = emop->emo_stats_periodic(enp, esmp, period_ms, events)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *essp,
+ __inout_opt uint32_t *generationp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ rc = emop->emo_stats_update(enp, esmp, essp, generationp);
+ if (rc == 0)
+ epp->ep_mac_stats_pending = B_FALSE;
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+ __checkReturn efx_rc_t
+efx_mac_select(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mac_type_t type = EFX_MAC_INVALID;
+ const efx_mac_ops_t *emop;
+ int rc = EINVAL;
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ emop = &__efx_siena_mac_ops;
+ type = EFX_MAC_SIENA;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ emop = &__efx_ef10_mac_ops;
+ type = EFX_MAC_HUNTINGTON;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ emop = &__efx_ef10_mac_ops;
+ type = EFX_MAC_MEDFORD;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT(type != EFX_MAC_INVALID);
+ EFSYS_ASSERT3U(type, <, EFX_MAC_NTYPES);
+ EFSYS_ASSERT(emop != NULL);
+
+ epp->ep_emop = emop;
+ epp->ep_mac_type = type;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#if EFSYS_OPT_SIENA
+
+#define EFX_MAC_HASH_BITS (1 << 8)
+
+/* Compute the multicast hash as used on Falcon and Siena. */
+static void
+siena_mac_multicast_hash_compute(
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count,
+ __out efx_oword_t *hash_low,
+ __out efx_oword_t *hash_high)
+{
+ uint32_t crc, index;
+ int i;
+
+ EFSYS_ASSERT(hash_low != NULL);
+ EFSYS_ASSERT(hash_high != NULL);
+
+ EFX_ZERO_OWORD(*hash_low);
+ EFX_ZERO_OWORD(*hash_high);
+
+ for (i = 0; i < count; i++) {
+ /* Calculate hash bucket (IEEE 802.3 CRC32 of the MAC addr) */
+ crc = efx_crc32_calculate(0xffffffff, addrs, EFX_MAC_ADDR_LEN);
+ index = crc % EFX_MAC_HASH_BITS;
+ if (index < 128) {
+ EFX_SET_OWORD_BIT(*hash_low, index);
+ } else {
+ EFX_SET_OWORD_BIT(*hash_high, index - 128);
+ }
+
+ addrs += EFX_MAC_ADDR_LEN;
+ }
+}
+
+static __checkReturn efx_rc_t
+siena_mac_multicast_list_set(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_oword_t old_hash[2];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ memcpy(old_hash, epp->ep_multicst_hash, sizeof (old_hash));
+
+ siena_mac_multicast_hash_compute(
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count,
+ &epp->ep_multicst_hash[0],
+ &epp->ep_multicst_hash[1]);
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ memcpy(epp->ep_multicst_hash, old_hash, sizeof (old_hash));
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_mcdi.c b/src/seastar/dpdk/drivers/net/sfc/base/efx_mcdi.c
new file mode 100644
index 00000000..c61b943c
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_mcdi.c
@@ -0,0 +1,2346 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MCDI
+
+/*
+ * There are three versions of the MCDI interface:
+ * - MCDIv0: Siena BootROM. Transport uses MCDIv1 headers.
+ * - MCDIv1: Siena firmware and Huntington BootROM.
+ * - MCDIv2: EF10 firmware (Huntington/Medford) and Medford BootROM.
+ * Transport uses MCDIv2 headers.
+ *
+ * MCDIv2 Header NOT_EPOCH flag
+ * ----------------------------
+ * A new epoch begins at initial startup or after an MC reboot, and defines when
+ * the MC should reject stale MCDI requests.
+ *
+ * The first MCDI request sent by the host should contain NOT_EPOCH=0, and all
+ * subsequent requests (until the next MC reboot) should contain NOT_EPOCH=1.
+ *
+ * After rebooting the MC will fail all requests with NOT_EPOCH=1 by writing a
+ * response with ERROR=1 and DATALEN=0 until a request is seen with NOT_EPOCH=0.
+ */
+
+
+
+#if EFSYS_OPT_SIENA
+
+static const efx_mcdi_ops_t __efx_mcdi_siena_ops = {
+ siena_mcdi_init, /* emco_init */
+ siena_mcdi_send_request, /* emco_send_request */
+ siena_mcdi_poll_reboot, /* emco_poll_reboot */
+ siena_mcdi_poll_response, /* emco_poll_response */
+ siena_mcdi_read_response, /* emco_read_response */
+ siena_mcdi_fini, /* emco_fini */
+ siena_mcdi_feature_supported, /* emco_feature_supported */
+ siena_mcdi_get_timeout, /* emco_get_timeout */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = {
+ ef10_mcdi_init, /* emco_init */
+ ef10_mcdi_send_request, /* emco_send_request */
+ ef10_mcdi_poll_reboot, /* emco_poll_reboot */
+ ef10_mcdi_poll_response, /* emco_poll_response */
+ ef10_mcdi_read_response, /* emco_read_response */
+ ef10_mcdi_fini, /* emco_fini */
+ ef10_mcdi_feature_supported, /* emco_feature_supported */
+ ef10_mcdi_get_timeout, /* emco_get_timeout */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+
+
+ __checkReturn efx_rc_t
+efx_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *emtp)
+{
+ const efx_mcdi_ops_t *emcop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0);
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ emcop = &__efx_mcdi_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ emcop = &__efx_mcdi_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ emcop = &__efx_mcdi_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (enp->en_features & EFX_FEATURE_MCDI_DMA) {
+ /* MCDI requires a DMA buffer in host memory */
+ if ((emtp == NULL) || (emtp->emt_dma_mem) == NULL) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ }
+ enp->en_mcdi.em_emtp = emtp;
+
+ if (emcop != NULL && emcop->emco_init != NULL) {
+ if ((rc = emcop->emco_init(enp, emtp)) != 0)
+ goto fail3;
+ }
+
+ enp->en_mcdi.em_emcop = emcop;
+ enp->en_mod_flags |= EFX_MOD_MCDI;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_mcdi.em_emcop = NULL;
+ enp->en_mcdi.em_emtp = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_MCDI;
+
+ return (rc);
+}
+
+ void
+efx_mcdi_fini(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, ==, EFX_MOD_MCDI);
+
+ if (emcop != NULL && emcop->emco_fini != NULL)
+ emcop->emco_fini(enp);
+
+ emip->emi_port = 0;
+ emip->emi_aborted = 0;
+
+ enp->en_mcdi.em_emcop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_MCDI;
+}
+
+ void
+efx_mcdi_new_epoch(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efsys_lock_state_t state;
+
+ /* Start a new epoch (allow fresh MCDI requests to succeed) */
+ EFSYS_LOCK(enp->en_eslp, state);
+ emip->emi_new_epoch = B_TRUE;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+}
+
+static void
+efx_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in void *hdrp,
+ __in size_t hdr_len,
+ __in void *sdup,
+ __in size_t sdu_len)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+
+ emcop->emco_send_request(enp, hdrp, hdr_len, sdup, sdu_len);
+}
+
+static efx_rc_t
+efx_mcdi_poll_reboot(
+ __in efx_nic_t *enp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ rc = emcop->emco_poll_reboot(enp);
+ return (rc);
+}
+
+static boolean_t
+efx_mcdi_poll_response(
+ __in efx_nic_t *enp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ boolean_t available;
+
+ available = emcop->emco_poll_response(enp);
+ return (available);
+}
+
+static void
+efx_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out void *bufferp,
+ __in size_t offset,
+ __in size_t length)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+
+ emcop->emco_read_response(enp, bufferp, offset, length);
+}
+
+ void
+efx_mcdi_request_start(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __in boolean_t ev_cpl)
+{
+#if EFSYS_OPT_MCDI_LOGGING
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+#endif
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t hdr[2];
+ size_t hdr_len;
+ unsigned int max_version;
+ unsigned int seq;
+ unsigned int xflags;
+ boolean_t new_epoch;
+ efsys_lock_state_t state;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /*
+ * efx_mcdi_request_start() is naturally serialised against both
+ * efx_mcdi_request_poll() and efx_mcdi_ev_cpl()/efx_mcdi_ev_death(),
+ * by virtue of there only being one outstanding MCDI request.
+ * Unfortunately, upper layers may also call efx_mcdi_request_abort()
+ * at any time, to timeout a pending mcdi request, That request may
+ * then subsequently complete, meaning efx_mcdi_ev_cpl() or
+ * efx_mcdi_ev_death() may end up running in parallel with
+ * efx_mcdi_request_start(). This race is handled by ensuring that
+ * %emi_pending_req, %emi_ev_cpl and %emi_seq are protected by the
+ * en_eslp lock.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ EFSYS_ASSERT(emip->emi_pending_req == NULL);
+ emip->emi_pending_req = emrp;
+ emip->emi_ev_cpl = ev_cpl;
+ emip->emi_poll_cnt = 0;
+ seq = emip->emi_seq++ & EFX_MASK32(MCDI_HEADER_SEQ);
+ new_epoch = emip->emi_new_epoch;
+ max_version = emip->emi_max_version;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ xflags = 0;
+ if (ev_cpl)
+ xflags |= MCDI_HEADER_XFLAGS_EVREQ;
+
+ /*
+ * Huntington firmware supports MCDIv2, but the Huntington BootROM only
+ * supports MCDIv1. Use MCDIv1 headers for MCDIv1 commands where
+ * possible to support this.
+ */
+ if ((max_version >= 2) &&
+ ((emrp->emr_cmd > MC_CMD_CMD_SPACE_ESCAPE_7) ||
+ (emrp->emr_in_length > MCDI_CTL_SDU_LEN_MAX_V1))) {
+ /* Construct MCDI v2 header */
+ hdr_len = sizeof (hdr);
+ EFX_POPULATE_DWORD_8(hdr[0],
+ MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_DATALEN, 0,
+ MCDI_HEADER_SEQ, seq,
+ MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1,
+ MCDI_HEADER_ERROR, 0,
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_XFLAGS, xflags);
+
+ EFX_POPULATE_DWORD_2(hdr[1],
+ MC_CMD_V2_EXTN_IN_EXTENDED_CMD, emrp->emr_cmd,
+ MC_CMD_V2_EXTN_IN_ACTUAL_LEN, emrp->emr_in_length);
+ } else {
+ /* Construct MCDI v1 header */
+ hdr_len = sizeof (hdr[0]);
+ EFX_POPULATE_DWORD_8(hdr[0],
+ MCDI_HEADER_CODE, emrp->emr_cmd,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_DATALEN, emrp->emr_in_length,
+ MCDI_HEADER_SEQ, seq,
+ MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1,
+ MCDI_HEADER_ERROR, 0,
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_XFLAGS, xflags);
+ }
+
+#if EFSYS_OPT_MCDI_LOGGING
+ if (emtp->emt_logger != NULL) {
+ emtp->emt_logger(emtp->emt_context, EFX_LOG_MCDI_REQUEST,
+ &hdr[0], hdr_len,
+ emrp->emr_in_buf, emrp->emr_in_length);
+ }
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+ efx_mcdi_send_request(enp, &hdr[0], hdr_len,
+ emrp->emr_in_buf, emrp->emr_in_length);
+}
+
+
+static void
+efx_mcdi_read_response_header(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp)
+{
+#if EFSYS_OPT_MCDI_LOGGING
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t hdr[2];
+ unsigned int hdr_len;
+ unsigned int data_len;
+ unsigned int seq;
+ unsigned int cmd;
+ unsigned int error;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(emrp != NULL);
+
+ efx_mcdi_read_response(enp, &hdr[0], 0, sizeof (hdr[0]));
+ hdr_len = sizeof (hdr[0]);
+
+ cmd = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE);
+ seq = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_SEQ);
+ error = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_ERROR);
+
+ if (cmd != MC_CMD_V2_EXTN) {
+ data_len = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_DATALEN);
+ } else {
+ efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1]));
+ hdr_len += sizeof (hdr[1]);
+
+ cmd = EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD);
+ data_len =
+ EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
+ }
+
+ if (error && (data_len == 0)) {
+ /* The MC has rebooted since the request was sent. */
+ EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US);
+ efx_mcdi_poll_reboot(enp);
+ rc = EIO;
+ goto fail1;
+ }
+ if ((cmd != emrp->emr_cmd) ||
+ (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) {
+ /* Response is for a different request */
+ rc = EIO;
+ goto fail2;
+ }
+ if (error) {
+ efx_dword_t err[2];
+ unsigned int err_len = MIN(data_len, sizeof (err));
+ int err_code = MC_CMD_ERR_EPROTO;
+ int err_arg = 0;
+
+ /* Read error code (and arg num for MCDI v2 commands) */
+ efx_mcdi_read_response(enp, &err, hdr_len, err_len);
+
+ if (err_len >= (MC_CMD_ERR_CODE_OFST + sizeof (efx_dword_t)))
+ err_code = EFX_DWORD_FIELD(err[0], EFX_DWORD_0);
+#ifdef WITH_MCDI_V2
+ if (err_len >= (MC_CMD_ERR_ARG_OFST + sizeof (efx_dword_t)))
+ err_arg = EFX_DWORD_FIELD(err[1], EFX_DWORD_0);
+#endif
+ emrp->emr_err_code = err_code;
+ emrp->emr_err_arg = err_arg;
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ if ((err_code == MC_CMD_ERR_PROXY_PENDING) &&
+ (err_len == sizeof (err))) {
+ /*
+ * The MCDI request would normally fail with EPERM, but
+ * firmware has forwarded it to an authorization agent
+ * attached to a privileged PF.
+ *
+ * Save the authorization request handle. The client
+ * must wait for a PROXY_RESPONSE event, or timeout.
+ */
+ emrp->emr_proxy_handle = err_arg;
+ }
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+#if EFSYS_OPT_MCDI_LOGGING
+ if (emtp->emt_logger != NULL) {
+ emtp->emt_logger(emtp->emt_context,
+ EFX_LOG_MCDI_RESPONSE,
+ &hdr[0], hdr_len,
+ &err[0], err_len);
+ }
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+ if (!emrp->emr_quiet) {
+ EFSYS_PROBE3(mcdi_err_arg, int, emrp->emr_cmd,
+ int, err_code, int, err_arg);
+ }
+
+ rc = efx_mcdi_request_errcode(err_code);
+ goto fail3;
+ }
+
+ emrp->emr_rc = 0;
+ emrp->emr_out_length_used = data_len;
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ emrp->emr_proxy_handle = 0;
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+ return;
+
+fail3:
+fail2:
+fail1:
+ emrp->emr_rc = rc;
+ emrp->emr_out_length_used = 0;
+}
+
+static void
+efx_mcdi_finish_response(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp)
+{
+#if EFSYS_OPT_MCDI_LOGGING
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+ efx_dword_t hdr[2];
+ unsigned int hdr_len;
+ size_t bytes;
+
+ if (emrp->emr_out_buf == NULL)
+ return;
+
+ /* Read the command header to detect MCDI response format */
+ hdr_len = sizeof (hdr[0]);
+ efx_mcdi_read_response(enp, &hdr[0], 0, hdr_len);
+ if (EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE) == MC_CMD_V2_EXTN) {
+ /*
+ * Read the actual payload length. The length given in the event
+ * is only correct for responses with the V1 format.
+ */
+ efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1]));
+ hdr_len += sizeof (hdr[1]);
+
+ emrp->emr_out_length_used = EFX_DWORD_FIELD(hdr[1],
+ MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
+ }
+
+ /* Copy payload out into caller supplied buffer */
+ bytes = MIN(emrp->emr_out_length_used, emrp->emr_out_length);
+ efx_mcdi_read_response(enp, emrp->emr_out_buf, hdr_len, bytes);
+
+#if EFSYS_OPT_MCDI_LOGGING
+ if (emtp->emt_logger != NULL) {
+ emtp->emt_logger(emtp->emt_context,
+ EFX_LOG_MCDI_RESPONSE,
+ &hdr[0], hdr_len,
+ emrp->emr_out_buf, bytes);
+ }
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+}
+
+
+ __checkReturn boolean_t
+efx_mcdi_request_poll(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_mcdi_req_t *emrp;
+ efsys_lock_state_t state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /* Serialise against post-watchdog efx_mcdi_ev* */
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ EFSYS_ASSERT(emip->emi_pending_req != NULL);
+ EFSYS_ASSERT(!emip->emi_ev_cpl);
+ emrp = emip->emi_pending_req;
+
+ /* Check for reboot atomically w.r.t efx_mcdi_request_start */
+ if (emip->emi_poll_cnt++ == 0) {
+ if ((rc = efx_mcdi_poll_reboot(enp)) != 0) {
+ emip->emi_pending_req = NULL;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ /* Reboot/Assertion */
+ if (rc == EIO || rc == EINTR)
+ efx_mcdi_raise_exception(enp, emrp, rc);
+
+ goto fail1;
+ }
+ }
+
+ /* Check if a response is available */
+ if (efx_mcdi_poll_response(enp) == B_FALSE) {
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return (B_FALSE);
+ }
+
+ /* Read the response header */
+ efx_mcdi_read_response_header(enp, emrp);
+
+ /* Request complete */
+ emip->emi_pending_req = NULL;
+
+ /* Ensure stale MCDI requests fail after an MC reboot. */
+ emip->emi_new_epoch = B_FALSE;
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if ((rc = emrp->emr_rc) != 0)
+ goto fail2;
+
+ efx_mcdi_finish_response(enp, emrp);
+ return (B_TRUE);
+
+fail2:
+ if (!emrp->emr_quiet)
+ EFSYS_PROBE(fail2);
+fail1:
+ if (!emrp->emr_quiet)
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (B_TRUE);
+}
+
+ __checkReturn boolean_t
+efx_mcdi_request_abort(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_mcdi_req_t *emrp;
+ boolean_t aborted;
+ efsys_lock_state_t state;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /*
+ * efx_mcdi_ev_* may have already completed this event, and be
+ * spinning/blocked on the upper layer lock. So it *is* legitimate
+ * to for emi_pending_req to be NULL. If there is a pending event
+ * completed request, then provide a "credit" to allow
+ * efx_mcdi_ev_cpl() to accept a single spurious completion.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ emrp = emip->emi_pending_req;
+ aborted = (emrp != NULL);
+ if (aborted) {
+ emip->emi_pending_req = NULL;
+
+ /* Error the request */
+ emrp->emr_out_length_used = 0;
+ emrp->emr_rc = ETIMEDOUT;
+
+ /* Provide a credit for seqno/emr_pending_req mismatches */
+ if (emip->emi_ev_cpl)
+ ++emip->emi_aborted;
+
+ /*
+ * The upper layer has called us, so we don't
+ * need to complete the request.
+ */
+ }
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (aborted);
+}
+
+ void
+efx_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+
+ emcop->emco_get_timeout(enp, emrp, timeoutp);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_request_errcode(
+ __in unsigned int err)
+{
+
+ switch (err) {
+ /* MCDI v1 */
+ case MC_CMD_ERR_EPERM:
+ return (EACCES);
+ case MC_CMD_ERR_ENOENT:
+ return (ENOENT);
+ case MC_CMD_ERR_EINTR:
+ return (EINTR);
+ case MC_CMD_ERR_EACCES:
+ return (EACCES);
+ case MC_CMD_ERR_EBUSY:
+ return (EBUSY);
+ case MC_CMD_ERR_EINVAL:
+ return (EINVAL);
+ case MC_CMD_ERR_EDEADLK:
+ return (EDEADLK);
+ case MC_CMD_ERR_ENOSYS:
+ return (ENOTSUP);
+ case MC_CMD_ERR_ETIME:
+ return (ETIMEDOUT);
+ case MC_CMD_ERR_ENOTSUP:
+ return (ENOTSUP);
+ case MC_CMD_ERR_EALREADY:
+ return (EALREADY);
+
+ /* MCDI v2 */
+ case MC_CMD_ERR_EEXIST:
+ return (EEXIST);
+#ifdef MC_CMD_ERR_EAGAIN
+ case MC_CMD_ERR_EAGAIN:
+ return (EAGAIN);
+#endif
+#ifdef MC_CMD_ERR_ENOSPC
+ case MC_CMD_ERR_ENOSPC:
+ return (ENOSPC);
+#endif
+ case MC_CMD_ERR_ERANGE:
+ return (ERANGE);
+
+ case MC_CMD_ERR_ALLOC_FAIL:
+ return (ENOMEM);
+ case MC_CMD_ERR_NO_VADAPTOR:
+ return (ENOENT);
+ case MC_CMD_ERR_NO_EVB_PORT:
+ return (ENOENT);
+ case MC_CMD_ERR_NO_VSWITCH:
+ return (ENODEV);
+ case MC_CMD_ERR_VLAN_LIMIT:
+ return (EINVAL);
+ case MC_CMD_ERR_BAD_PCI_FUNC:
+ return (ENODEV);
+ case MC_CMD_ERR_BAD_VLAN_MODE:
+ return (EINVAL);
+ case MC_CMD_ERR_BAD_VSWITCH_TYPE:
+ return (EINVAL);
+ case MC_CMD_ERR_BAD_VPORT_TYPE:
+ return (EINVAL);
+ case MC_CMD_ERR_MAC_EXIST:
+ return (EEXIST);
+
+ case MC_CMD_ERR_PROXY_PENDING:
+ return (EAGAIN);
+
+ default:
+ EFSYS_PROBE1(mc_pcol_error, int, err);
+ return (EIO);
+ }
+}
+
+ void
+efx_mcdi_raise_exception(
+ __in efx_nic_t *enp,
+ __in_opt efx_mcdi_req_t *emrp,
+ __in int rc)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_mcdi_exception_t exception;
+
+ /* Reboot or Assertion failure only */
+ EFSYS_ASSERT(rc == EIO || rc == EINTR);
+
+ /*
+ * If MC_CMD_REBOOT causes a reboot (dependent on parameters),
+ * then the EIO is not worthy of an exception.
+ */
+ if (emrp != NULL && emrp->emr_cmd == MC_CMD_REBOOT && rc == EIO)
+ return;
+
+ exception = (rc == EIO)
+ ? EFX_MCDI_EXCEPTION_MC_REBOOT
+ : EFX_MCDI_EXCEPTION_MC_BADASSERT;
+
+ emtp->emt_exception(emtp->emt_context, exception);
+}
+
+ void
+efx_mcdi_execute(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ emrp->emr_quiet = B_FALSE;
+ emtp->emt_execute(emtp->emt_context, emrp);
+}
+
+ void
+efx_mcdi_execute_quiet(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ emrp->emr_quiet = B_TRUE;
+ emtp->emt_execute(emtp->emt_context, emrp);
+}
+
+ void
+efx_mcdi_ev_cpl(
+ __in efx_nic_t *enp,
+ __in unsigned int seq,
+ __in unsigned int outlen,
+ __in int errcode)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_mcdi_req_t *emrp;
+ efsys_lock_state_t state;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /*
+ * Serialise against efx_mcdi_request_poll()/efx_mcdi_request_start()
+ * when we're completing an aborted request.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ if (emip->emi_pending_req == NULL || !emip->emi_ev_cpl ||
+ (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) {
+ EFSYS_ASSERT(emip->emi_aborted > 0);
+ if (emip->emi_aborted > 0)
+ --emip->emi_aborted;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return;
+ }
+
+ emrp = emip->emi_pending_req;
+ emip->emi_pending_req = NULL;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if (emip->emi_max_version >= 2) {
+ /* MCDIv2 response details do not fit into an event. */
+ efx_mcdi_read_response_header(enp, emrp);
+ } else {
+ if (errcode != 0) {
+ if (!emrp->emr_quiet) {
+ EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd,
+ int, errcode);
+ }
+ emrp->emr_out_length_used = 0;
+ emrp->emr_rc = efx_mcdi_request_errcode(errcode);
+ } else {
+ emrp->emr_out_length_used = outlen;
+ emrp->emr_rc = 0;
+ }
+ }
+ if (errcode == 0) {
+ efx_mcdi_finish_response(enp, emrp);
+ }
+
+ emtp->emt_ev_cpl(emtp->emt_context);
+}
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_proxy_handle(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *handlep)
+{
+ efx_rc_t rc;
+
+ /*
+ * Return proxy handle from MCDI request that returned with error
+ * MC_MCD_ERR_PROXY_PENDING. This handle is used to wait for a matching
+ * PROXY_RESPONSE event.
+ */
+ if ((emrp == NULL) || (handlep == NULL)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if ((emrp->emr_rc != 0) &&
+ (emrp->emr_err_code == MC_CMD_ERR_PROXY_PENDING)) {
+ *handlep = emrp->emr_proxy_handle;
+ rc = 0;
+ } else {
+ *handlep = 0;
+ rc = ENOENT;
+ }
+ return (rc);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_mcdi_ev_proxy_response(
+ __in efx_nic_t *enp,
+ __in unsigned int handle,
+ __in unsigned int status)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_rc_t rc;
+
+ /*
+ * Handle results of an authorization request for a privileged MCDI
+ * command. If authorization was granted then we must re-issue the
+ * original MCDI request. If authorization failed or timed out,
+ * then the original MCDI request should be completed with the
+ * result code from this event.
+ */
+ rc = (status == 0) ? 0 : efx_mcdi_request_errcode(status);
+
+ emtp->emt_ev_proxy_response(emtp->emt_context, handle, rc);
+}
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+ void
+efx_mcdi_ev_death(
+ __in efx_nic_t *enp,
+ __in int rc)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_mcdi_req_t *emrp = NULL;
+ boolean_t ev_cpl;
+ efsys_lock_state_t state;
+
+ /*
+ * The MCDI request (if there is one) has been terminated, either
+ * by a BADASSERT or REBOOT event.
+ *
+ * If there is an outstanding event-completed MCDI operation, then we
+ * will never receive the completion event (because both MCDI
+ * completions and BADASSERT events are sent to the same evq). So
+ * complete this MCDI op.
+ *
+ * This function might run in parallel with efx_mcdi_request_poll()
+ * for poll completed mcdi requests, and also with
+ * efx_mcdi_request_start() for post-watchdog completions.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ emrp = emip->emi_pending_req;
+ ev_cpl = emip->emi_ev_cpl;
+ if (emrp != NULL && emip->emi_ev_cpl) {
+ emip->emi_pending_req = NULL;
+
+ emrp->emr_out_length_used = 0;
+ emrp->emr_rc = rc;
+ ++emip->emi_aborted;
+ }
+
+ /*
+ * Since we're running in parallel with a request, consume the
+ * status word before dropping the lock.
+ */
+ if (rc == EIO || rc == EINTR) {
+ EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US);
+ (void) efx_mcdi_poll_reboot(enp);
+ emip->emi_new_epoch = B_TRUE;
+ }
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ efx_mcdi_raise_exception(enp, emrp, rc);
+
+ if (emrp != NULL && ev_cpl)
+ emtp->emt_ev_cpl(emtp->emt_context);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_version(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(4) uint16_t versionp[4],
+ __out_opt uint32_t *buildp,
+ __out_opt efx_mcdi_boot_t *statusp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MAX(MC_CMD_GET_VERSION_IN_LEN,
+ MC_CMD_GET_VERSION_OUT_LEN),
+ MAX(MC_CMD_GET_BOOT_STATUS_IN_LEN,
+ MC_CMD_GET_BOOT_STATUS_OUT_LEN))];
+ efx_word_t *ver_words;
+ uint16_t version[4];
+ uint32_t build;
+ efx_mcdi_boot_t status;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_VERSION;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_VERSION_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_VERSION_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* bootrom support */
+ if (req.emr_out_length_used == MC_CMD_GET_VERSION_V0_OUT_LEN) {
+ version[0] = version[1] = version[2] = version[3] = 0;
+ build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
+
+ goto version;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_VERSION_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ ver_words = MCDI_OUT2(req, efx_word_t, GET_VERSION_OUT_VERSION);
+ version[0] = EFX_WORD_FIELD(ver_words[0], EFX_WORD_0);
+ version[1] = EFX_WORD_FIELD(ver_words[1], EFX_WORD_0);
+ version[2] = EFX_WORD_FIELD(ver_words[2], EFX_WORD_0);
+ version[3] = EFX_WORD_FIELD(ver_words[3], EFX_WORD_0);
+ build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
+
+version:
+ /* The bootrom doesn't understand BOOT_STATUS */
+ if (MC_FW_VERSION_IS_BOOTLOADER(build)) {
+ status = EFX_MCDI_BOOT_ROM;
+ goto out;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_BOOT_STATUS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_BOOT_STATUS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_BOOT_STATUS_OUT_LEN;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc == EACCES) {
+ /* Unprivileged functions cannot access BOOT_STATUS */
+ status = EFX_MCDI_BOOT_PRIMARY;
+ version[0] = version[1] = version[2] = version[3] = 0;
+ build = 0;
+ goto out;
+ }
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_BOOT_STATUS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail4;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, GET_BOOT_STATUS_OUT_FLAGS,
+ GET_BOOT_STATUS_OUT_FLAGS_PRIMARY))
+ status = EFX_MCDI_BOOT_PRIMARY;
+ else
+ status = EFX_MCDI_BOOT_SECONDARY;
+
+out:
+ if (versionp != NULL)
+ memcpy(versionp, version, sizeof (version));
+ if (buildp != NULL)
+ *buildp = build;
+ if (statusp != NULL)
+ *statusp = status;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_capabilities(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *flagsp,
+ __out_opt uint16_t *rx_dpcpu_fw_idp,
+ __out_opt uint16_t *tx_dpcpu_fw_idp,
+ __out_opt uint32_t *flags2p,
+ __out_opt uint32_t *tso2ncp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN,
+ MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)];
+ boolean_t v2_capable;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_CAPABILITIES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (flagsp != NULL)
+ *flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1);
+
+ if (rx_dpcpu_fw_idp != NULL)
+ *rx_dpcpu_fw_idp = MCDI_OUT_WORD(req,
+ GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
+
+ if (tx_dpcpu_fw_idp != NULL)
+ *tx_dpcpu_fw_idp = MCDI_OUT_WORD(req,
+ GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
+
+ if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)
+ v2_capable = B_FALSE;
+ else
+ v2_capable = B_TRUE;
+
+ if (flags2p != NULL) {
+ *flags2p = (v2_capable) ?
+ MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2) :
+ 0;
+ }
+
+ if (tso2ncp != NULL) {
+ *tso2ncp = (v2_capable) ?
+ MCDI_OUT_WORD(req,
+ GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS) :
+ 0;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_do_reboot(
+ __in efx_nic_t *enp,
+ __in boolean_t after_assertion)
+{
+ uint8_t payload[MAX(MC_CMD_REBOOT_IN_LEN, MC_CMD_REBOOT_OUT_LEN)];
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ /*
+ * We could require the caller to have caused en_mod_flags=0 to
+ * call this function. This doesn't help the other port though,
+ * who's about to get the MC ripped out from underneath them.
+ * Since they have to cope with the subsequent fallout of MCDI
+ * failures, we should as well.
+ */
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_REBOOT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_REBOOT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_REBOOT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, REBOOT_IN_FLAGS,
+ (after_assertion ? MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION : 0));
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc == EACCES) {
+ /* Unprivileged functions cannot reboot the MC. */
+ goto out;
+ }
+
+ /* A successful reboot request returns EIO. */
+ if (req.emr_rc != 0 && req.emr_rc != EIO) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+out:
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_reboot(
+ __in efx_nic_t *enp)
+{
+ return (efx_mcdi_do_reboot(enp, B_FALSE));
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_exit_assertion_handler(
+ __in efx_nic_t *enp)
+{
+ return (efx_mcdi_do_reboot(enp, B_TRUE));
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_read_assertion(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_ASSERTS_IN_LEN,
+ MC_CMD_GET_ASSERTS_OUT_LEN)];
+ const char *reason;
+ unsigned int flags;
+ unsigned int index;
+ unsigned int ofst;
+ int retry;
+ efx_rc_t rc;
+
+ /*
+ * Before we attempt to chat to the MC, we should verify that the MC
+ * isn't in it's assertion handler, either due to a previous reboot,
+ * or because we're reinitializing due to an eec_exception().
+ *
+ * Use GET_ASSERTS to read any assertion state that may be present.
+ * Retry this command twice. Once because a boot-time assertion failure
+ * might cause the 1st MCDI request to fail. And once again because
+ * we might race with efx_mcdi_exit_assertion_handler() running on
+ * partner port(s) on the same NIC.
+ */
+ retry = 2;
+ do {
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_ASSERTS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_ASSERTS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_ASSERTS_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, GET_ASSERTS_IN_CLEAR, 1);
+ efx_mcdi_execute_quiet(enp, &req);
+
+ } while ((req.emr_rc == EINTR || req.emr_rc == EIO) && retry-- > 0);
+
+ if (req.emr_rc != 0) {
+ if (req.emr_rc == EACCES) {
+ /* Unprivileged functions cannot clear assertions. */
+ goto out;
+ }
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_ASSERTS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ /* Print out any assertion state recorded */
+ flags = MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_GLOBAL_FLAGS);
+ if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
+ return (0);
+
+ reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
+ ? "system-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
+ ? "thread-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
+ ? "watchdog reset"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP)
+ ? "illegal address trap"
+ : "unknown assertion";
+ EFSYS_PROBE3(mcpu_assertion,
+ const char *, reason, unsigned int,
+ MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_SAVED_PC_OFFS),
+ unsigned int,
+ MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_THREAD_OFFS));
+
+ /* Print out the registers (r1 ... r31) */
+ ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
+ for (index = 1;
+ index < 1 + MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
+ index++) {
+ EFSYS_PROBE2(mcpu_register, unsigned int, index, unsigned int,
+ EFX_DWORD_FIELD(*MCDI_OUT(req, efx_dword_t, ofst),
+ EFX_DWORD_0));
+ ofst += sizeof (efx_dword_t);
+ }
+ EFSYS_ASSERT(ofst <= MC_CMD_GET_ASSERTS_OUT_LEN);
+
+out:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * Internal routines for for specific MCDI requests.
+ */
+
+ __checkReturn efx_rc_t
+efx_mcdi_drv_attach(
+ __in efx_nic_t *enp,
+ __in boolean_t attach)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_DRV_ATTACH_IN_LEN,
+ MC_CMD_DRV_ATTACH_EXT_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_DRV_ATTACH;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_DRV_ATTACH_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_DRV_ATTACH_EXT_OUT_LEN;
+
+ /*
+ * Use DONT_CARE for the datapath firmware type to ensure that the
+ * driver can attach to an unprivileged function. The datapath firmware
+ * type to use is controlled by the 'sfboot' utility.
+ */
+ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_NEW_STATE, attach ? 1 : 0);
+ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1);
+ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_DONT_CARE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_board_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *board_typep,
+ __out_opt efx_dword_t *capabilitiesp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6])
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_BOARD_CFG_IN_LEN,
+ MC_CMD_GET_BOARD_CFG_OUT_LENMIN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_BOARD_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMIN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (mac_addrp != NULL) {
+ uint8_t *addrp;
+
+ if (emip->emi_port == 1) {
+ addrp = MCDI_OUT2(req, uint8_t,
+ GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0);
+ } else if (emip->emi_port == 2) {
+ addrp = MCDI_OUT2(req, uint8_t,
+ GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1);
+ } else {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+ }
+
+ if (capabilitiesp != NULL) {
+ if (emip->emi_port == 1) {
+ *capabilitiesp = *MCDI_OUT2(req, efx_dword_t,
+ GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
+ } else if (emip->emi_port == 2) {
+ *capabilitiesp = *MCDI_OUT2(req, efx_dword_t,
+ GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
+ } else {
+ rc = EINVAL;
+ goto fail4;
+ }
+ }
+
+ if (board_typep != NULL) {
+ *board_typep = MCDI_OUT_DWORD(req,
+ GET_BOARD_CFG_OUT_BOARD_TYPE);
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_resource_limits(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *nevqp,
+ __out_opt uint32_t *nrxqp,
+ __out_opt uint32_t *ntxqp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_RESOURCE_LIMITS_IN_LEN,
+ MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_RESOURCE_LIMITS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_RESOURCE_LIMITS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (nevqp != NULL)
+ *nevqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_EVQ);
+ if (nrxqp != NULL)
+ *nrxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_RXQ);
+ if (ntxqp != NULL)
+ *ntxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_TXQ);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_phy_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_CFG_IN_LEN,
+ MC_CMD_GET_PHY_CFG_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PHY_CFG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PHY_CFG_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE);
+#if EFSYS_OPT_NAMES
+ (void) strncpy(encp->enc_phy_name,
+ MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME),
+ MIN(sizeof (encp->enc_phy_name) - 1,
+ MC_CMD_GET_PHY_CFG_OUT_NAME_LEN));
+#endif /* EFSYS_OPT_NAMES */
+ (void) memset(encp->enc_phy_revision, 0,
+ sizeof (encp->enc_phy_revision));
+ memcpy(encp->enc_phy_revision,
+ MCDI_OUT2(req, char, GET_PHY_CFG_OUT_REVISION),
+ MIN(sizeof (encp->enc_phy_revision) - 1,
+ MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN));
+#if EFSYS_OPT_PHY_LED_CONTROL
+ encp->enc_led_mask = ((1 << EFX_PHY_LED_DEFAULT) |
+ (1 << EFX_PHY_LED_OFF) |
+ (1 << EFX_PHY_LED_ON));
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ /* Get the media type of the fixed port, if recognised. */
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_XAUI == EFX_PHY_MEDIA_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_CX4 == EFX_PHY_MEDIA_CX4);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_KX4 == EFX_PHY_MEDIA_KX4);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_XFP == EFX_PHY_MEDIA_XFP);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_QSFP_PLUS == EFX_PHY_MEDIA_QSFP_PLUS);
+ epp->ep_fixed_port_type =
+ (efx_phy_media_type_t) MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE);
+ if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES)
+ epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID;
+
+ epp->ep_phy_cap_mask =
+ MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_SUPPORTED_CAP);
+#if EFSYS_OPT_PHY_FLAGS
+ encp->enc_phy_flags_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_FLAGS);
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+ encp->enc_port = (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_PRT);
+
+ /* Populate internal state */
+ encp->enc_mcdi_mdio_channel =
+ (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_CHANNEL);
+
+#if EFSYS_OPT_PHY_STATS
+ encp->enc_mcdi_phy_stat_mask =
+ MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_STATS_MASK);
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+ encp->enc_bist_mask = 0;
+ if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
+ GET_PHY_CFG_OUT_BIST_CABLE_SHORT))
+ encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_SHORT);
+ if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
+ GET_PHY_CFG_OUT_BIST_CABLE_LONG))
+ encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_LONG);
+ if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
+ GET_PHY_CFG_OUT_BIST))
+ encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_NORMAL);
+#endif /* EFSYS_OPT_BIST */
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_firmware_update_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_FW_UPDATE, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported updates */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_macaddr_change_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_MACADDR_CHANGE, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported MAC changes */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_link_control_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_LINK_CONTROL, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported link control */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_spoofing_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_MAC_SPOOFING, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported MAC spoofing */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_BIST
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+/*
+ * Enter bist offline mode. This is a fw mode which puts the NIC into a state
+ * where memory BIST tests can be run and not much else can interfere or happen.
+ * A reboot is required to exit this mode.
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_bist_enable_offline(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN == 0);
+ EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN == 0);
+
+ req.emr_cmd = MC_CMD_ENABLE_OFFLINE_BIST;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_mcdi_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_START_BIST_IN_LEN,
+ MC_CMD_START_BIST_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_START_BIST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_START_BIST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_START_BIST_OUT_LEN;
+
+ switch (type) {
+ case EFX_BIST_TYPE_PHY_NORMAL:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST);
+ break;
+ case EFX_BIST_TYPE_PHY_CABLE_SHORT:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_PHY_BIST_CABLE_SHORT);
+ break;
+ case EFX_BIST_TYPE_PHY_CABLE_LONG:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_PHY_BIST_CABLE_LONG);
+ break;
+ case EFX_BIST_TYPE_MC_MEM:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_MC_MEM_BIST);
+ break;
+ case EFX_BIST_TYPE_SAT_MEM:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_PORT_MEM_BIST);
+ break;
+ case EFX_BIST_TYPE_REG:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_REG_BIST);
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_BIST */
+
+
+/* Enable logging of some events (e.g. link state changes) */
+ __checkReturn efx_rc_t
+efx_mcdi_log_ctrl(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LOG_CTRL_IN_LEN,
+ MC_CMD_LOG_CTRL_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LOG_CTRL;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LOG_CTRL_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LOG_CTRL_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST,
+ MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ);
+ MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST_EVQ, 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#if EFSYS_OPT_MAC_STATS
+
+typedef enum efx_stats_action_e {
+ EFX_STATS_CLEAR,
+ EFX_STATS_UPLOAD,
+ EFX_STATS_ENABLE_NOEVENTS,
+ EFX_STATS_ENABLE_EVENTS,
+ EFX_STATS_DISABLE,
+} efx_stats_action_t;
+
+static __checkReturn efx_rc_t
+efx_mcdi_mac_stats(
+ __in efx_nic_t *enp,
+ __in_opt efsys_mem_t *esmp,
+ __in efx_stats_action_t action,
+ __in uint16_t period_ms)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_MAC_STATS_IN_LEN,
+ MC_CMD_MAC_STATS_OUT_DMA_LEN)];
+ int clear = (action == EFX_STATS_CLEAR);
+ int upload = (action == EFX_STATS_UPLOAD);
+ int enable = (action == EFX_STATS_ENABLE_NOEVENTS);
+ int events = (action == EFX_STATS_ENABLE_EVENTS);
+ int disable = (action == EFX_STATS_DISABLE);
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_MAC_STATS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_MAC_STATS_OUT_DMA_LEN;
+
+ MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD,
+ MAC_STATS_IN_DMA, upload,
+ MAC_STATS_IN_CLEAR, clear,
+ MAC_STATS_IN_PERIODIC_CHANGE, enable | events | disable,
+ MAC_STATS_IN_PERIODIC_ENABLE, enable | events,
+ MAC_STATS_IN_PERIODIC_NOEVENT, !events,
+ MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0);
+
+ if (esmp != NULL) {
+ int bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t);
+
+ EFX_STATIC_ASSERT(MC_CMD_MAC_NSTATS * sizeof (uint64_t) <=
+ EFX_MAC_STATS_SIZE);
+
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO,
+ EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI,
+ EFSYS_MEM_ADDR(esmp) >> 32);
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes);
+ } else {
+ EFSYS_ASSERT(!upload && !enable && !events);
+ }
+
+ /*
+ * NOTE: Do not use EVB_PORT_ID_ASSIGNED when disabling periodic stats,
+ * as this may fail (and leave periodic DMA enabled) if the
+ * vadapter has already been deleted.
+ */
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_PORT_ID,
+ (disable ? EVB_PORT_ID_NULL : enp->en_vport_id));
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ /* EF10: Expect ENOENT if no DMA queues are initialised */
+ if ((req.emr_rc != ENOENT) ||
+ (enp->en_rx_qcount + enp->en_tx_qcount != 0)) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_stats_clear(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_CLEAR, 0)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp)
+{
+ efx_rc_t rc;
+
+ /*
+ * The MC DMAs aggregate statistics for our convenience, so we can
+ * avoid having to pull the statistics buffer into the cache to
+ * maintain cumulative statistics.
+ */
+ if ((rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_UPLOAD, 0)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events)
+{
+ efx_rc_t rc;
+
+ /*
+ * The MC DMAs aggregate statistics for our convenience, so we can
+ * avoid having to pull the statistics buffer into the cache to
+ * maintain cumulative statistics.
+ * Huntington uses a fixed 1sec period.
+ * Medford uses a fixed 1sec period before v6.2.1.1033 firmware.
+ */
+ if (period_ms == 0)
+ rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_DISABLE, 0);
+ else if (events)
+ rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_EVENTS,
+ period_ms);
+ else
+ rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_NOEVENTS,
+ period_ms);
+
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+/*
+ * This function returns the pf and vf number of a function. If it is a pf the
+ * vf number is 0xffff. The vf number is the index of the vf on that
+ * function. So if you have 3 vfs on pf 0 the 3 vfs will return (pf=0,vf=0),
+ * (pf=0,vf=1), (pf=0,vf=2) aand the pf will return (pf=0, vf=0xffff).
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_get_function_info(
+ __in efx_nic_t *enp,
+ __out uint32_t *pfp,
+ __out_opt uint32_t *vfp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_FUNCTION_INFO_IN_LEN,
+ MC_CMD_GET_FUNCTION_INFO_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_FUNCTION_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_FUNCTION_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_FUNCTION_INFO_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *pfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_PF);
+ if (vfp != NULL)
+ *vfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_VF);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_privilege_mask(
+ __in efx_nic_t *enp,
+ __in uint32_t pf,
+ __in uint32_t vf,
+ __out uint32_t *maskp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_PRIVILEGE_MASK_IN_LEN,
+ MC_CMD_PRIVILEGE_MASK_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_PRIVILEGE_MASK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_PRIVILEGE_MASK_OUT_LEN;
+
+ MCDI_IN_POPULATE_DWORD_2(req, PRIVILEGE_MASK_IN_FUNCTION,
+ PRIVILEGE_MASK_IN_FUNCTION_PF, pf,
+ PRIVILEGE_MASK_IN_FUNCTION_VF, vf);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_PRIVILEGE_MASK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *maskp = MCDI_OUT_DWORD(req, PRIVILEGE_MASK_OUT_OLD_MASK);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_mcdi_set_workaround(
+ __in efx_nic_t *enp,
+ __in uint32_t type,
+ __in boolean_t enabled,
+ __out_opt uint32_t *flagsp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_WORKAROUND_IN_LEN,
+ MC_CMD_WORKAROUND_EXT_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_WORKAROUND;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_WORKAROUND_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_WORKAROUND_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, WORKAROUND_IN_TYPE, type);
+ MCDI_IN_SET_DWORD(req, WORKAROUND_IN_ENABLED, enabled ? 1 : 0);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (flagsp != NULL) {
+ if (req.emr_out_length_used >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
+ *flagsp = MCDI_OUT_DWORD(req, WORKAROUND_EXT_OUT_FLAGS);
+ else
+ *flagsp = 0;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_workarounds(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *implementedp,
+ __out_opt uint32_t *enabledp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_GET_WORKAROUNDS_OUT_LEN];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_WORKAROUNDS;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_WORKAROUNDS_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (implementedp != NULL) {
+ *implementedp =
+ MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_IMPLEMENTED);
+ }
+
+ if (enabledp != NULL) {
+ *enabledp = MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_ENABLED);
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Size of media information page in accordance with SFF-8472 and SFF-8436.
+ * It is used in MCDI interface as well.
+ */
+#define EFX_PHY_MEDIA_INFO_PAGE_SIZE 0x80
+
+static __checkReturn efx_rc_t
+efx_mcdi_get_phy_media_info(
+ __in efx_nic_t *enp,
+ __in uint32_t mcdi_page,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN,
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(
+ EFX_PHY_MEDIA_INFO_PAGE_SIZE))];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT((uint32_t)offset + len <= EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_MEDIA_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length =
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+
+ MCDI_IN_SET_DWORD(req, GET_PHY_MEDIA_INFO_IN_PAGE, mcdi_page);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used !=
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE)) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req, GET_PHY_MEDIA_INFO_OUT_DATALEN) !=
+ EFX_PHY_MEDIA_INFO_PAGE_SIZE) {
+ rc = EIO;
+ goto fail3;
+ }
+
+ memcpy(data,
+ MCDI_OUT2(req, uint8_t, GET_PHY_MEDIA_INFO_OUT_DATA) + offset,
+ len);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * 2-wire device address of the base information in accordance with SFF-8472
+ * Diagnostic Monitoring Interface for Optical Transceivers section
+ * 4 Memory Organization.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE 0xA0
+
+/*
+ * 2-wire device address of the digital diagnostics monitoring interface
+ * in accordance with SFF-8472 Diagnostic Monitoring Interface for Optical
+ * Transceivers section 4 Memory Organization.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM 0xA2
+
+/*
+ * Hard wired 2-wire device address for QSFP+ in accordance with SFF-8436
+ * QSFP+ 10 Gbs 4X PLUGGABLE TRANSCEIVER section 7.4 Device Addressing and
+ * Operation.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP 0xA0
+
+ __checkReturn efx_rc_t
+efx_mcdi_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_rc_t rc;
+ uint32_t mcdi_lower_page;
+ uint32_t mcdi_upper_page;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ /*
+ * Map device address to MC_CMD_GET_PHY_MEDIA_INFO pages.
+ * Offset plus length interface allows to access page 0 only.
+ * I.e. non-zero upper pages are not accessible.
+ * See SFF-8472 section 4 Memory Organization and SFF-8436 section 7.6
+ * QSFP+ Memory Map for details on how information is structured
+ * and accessible.
+ */
+ switch (epp->ep_fixed_port_type) {
+ case EFX_PHY_MEDIA_SFP_PLUS:
+ /*
+ * In accordance with SFF-8472 Diagnostic Monitoring
+ * Interface for Optical Transceivers section 4 Memory
+ * Organization two 2-wire addresses are defined.
+ */
+ switch (dev_addr) {
+ /* Base information */
+ case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE:
+ /*
+ * MCDI page 0 should be used to access lower
+ * page 0 (0x00 - 0x7f) at the device address 0xA0.
+ */
+ mcdi_lower_page = 0;
+ /*
+ * MCDI page 1 should be used to access upper
+ * page 0 (0x80 - 0xff) at the device address 0xA0.
+ */
+ mcdi_upper_page = 1;
+ break;
+ /* Diagnostics */
+ case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM:
+ /*
+ * MCDI page 2 should be used to access lower
+ * page 0 (0x00 - 0x7f) at the device address 0xA2.
+ */
+ mcdi_lower_page = 2;
+ /*
+ * MCDI page 3 should be used to access upper
+ * page 0 (0x80 - 0xff) at the device address 0xA2.
+ */
+ mcdi_upper_page = 3;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ break;
+ case EFX_PHY_MEDIA_QSFP_PLUS:
+ switch (dev_addr) {
+ case EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP:
+ /*
+ * MCDI page -1 should be used to access lower page 0
+ * (0x00 - 0x7f).
+ */
+ mcdi_lower_page = (uint32_t)-1;
+ /*
+ * MCDI page 0 should be used to access upper page 0
+ * (0x80h - 0xff).
+ */
+ mcdi_upper_page = 0;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (offset < EFX_PHY_MEDIA_INFO_PAGE_SIZE) {
+ uint8_t read_len =
+ MIN(len, EFX_PHY_MEDIA_INFO_PAGE_SIZE - offset);
+
+ rc = efx_mcdi_get_phy_media_info(enp,
+ mcdi_lower_page, offset, read_len, data);
+ if (rc != 0)
+ goto fail2;
+
+ data += read_len;
+ len -= read_len;
+
+ offset = 0;
+ } else {
+ offset -= EFX_PHY_MEDIA_INFO_PAGE_SIZE;
+ }
+
+ if (len > 0) {
+ EFSYS_ASSERT3U(len, <=, EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+ EFSYS_ASSERT3U(offset, <, EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+
+ rc = efx_mcdi_get_phy_media_info(enp,
+ mcdi_upper_page, offset, len, data);
+ if (rc != 0)
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MCDI */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_mcdi.h b/src/seastar/dpdk/drivers/net/sfc/base/efx_mcdi.h
new file mode 100644
index 00000000..21727713
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_mcdi.h
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_MCDI_H
+#define _SYS_EFX_MCDI_H
+
+#include "efx.h"
+#include "efx_regs_mcdi.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * A reboot/assertion causes the MCDI status word to be set after the
+ * command word is set or a REBOOT event is sent. If we notice a reboot
+ * via these mechanisms then wait 10ms for the status word to be set.
+ */
+#define EFX_MCDI_STATUS_SLEEP_US 10000
+
+struct efx_mcdi_req_s {
+ boolean_t emr_quiet;
+ /* Inputs: Command #, input buffer and length */
+ unsigned int emr_cmd;
+ uint8_t *emr_in_buf;
+ size_t emr_in_length;
+ /* Outputs: retcode, buffer, length, and length used*/
+ efx_rc_t emr_rc;
+ uint8_t *emr_out_buf;
+ size_t emr_out_length;
+ size_t emr_out_length_used;
+ /* Internals: low level transport details */
+ unsigned int emr_err_code;
+ unsigned int emr_err_arg;
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ uint32_t emr_proxy_handle;
+#endif
+};
+
+typedef struct efx_mcdi_iface_s {
+ unsigned int emi_port;
+ unsigned int emi_max_version;
+ unsigned int emi_seq;
+ efx_mcdi_req_t *emi_pending_req;
+ boolean_t emi_ev_cpl;
+ boolean_t emi_new_epoch;
+ int emi_aborted;
+ uint32_t emi_poll_cnt;
+ uint32_t emi_mc_reboot_status;
+} efx_mcdi_iface_t;
+
+extern void
+efx_mcdi_execute(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp);
+
+extern void
+efx_mcdi_execute_quiet(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp);
+
+extern void
+efx_mcdi_ev_cpl(
+ __in efx_nic_t *enp,
+ __in unsigned int seq,
+ __in unsigned int outlen,
+ __in int errcode);
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+extern __checkReturn efx_rc_t
+efx_mcdi_get_proxy_handle(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *handlep);
+
+extern void
+efx_mcdi_ev_proxy_response(
+ __in efx_nic_t *enp,
+ __in unsigned int handle,
+ __in unsigned int status);
+#endif
+
+extern void
+efx_mcdi_ev_death(
+ __in efx_nic_t *enp,
+ __in int rc);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_request_errcode(
+ __in unsigned int err);
+
+extern void
+efx_mcdi_raise_exception(
+ __in efx_nic_t *enp,
+ __in_opt efx_mcdi_req_t *emrp,
+ __in int rc);
+
+typedef enum efx_mcdi_boot_e {
+ EFX_MCDI_BOOT_PRIMARY,
+ EFX_MCDI_BOOT_SECONDARY,
+ EFX_MCDI_BOOT_ROM,
+} efx_mcdi_boot_t;
+
+extern __checkReturn efx_rc_t
+efx_mcdi_version(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(4) uint16_t versionp[4],
+ __out_opt uint32_t *buildp,
+ __out_opt efx_mcdi_boot_t *statusp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_capabilities(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *flagsp,
+ __out_opt uint16_t *rx_dpcpu_fw_idp,
+ __out_opt uint16_t *tx_dpcpu_fw_idp,
+ __out_opt uint32_t *flags2p,
+ __out_opt uint32_t *tso2ncp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_read_assertion(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_exit_assertion_handler(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_drv_attach(
+ __in efx_nic_t *enp,
+ __in boolean_t attach);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_board_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *board_typep,
+ __out_opt efx_dword_t *capabilitiesp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6]);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_phy_cfg(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_firmware_update_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_macaddr_change_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_link_control_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_spoofing_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+
+#if EFSYS_OPT_BIST
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+extern __checkReturn efx_rc_t
+efx_mcdi_bist_enable_offline(
+ __in efx_nic_t *enp);
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+extern __checkReturn efx_rc_t
+efx_mcdi_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+#endif /* EFSYS_OPT_BIST */
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_resource_limits(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *nevqp,
+ __out_opt uint32_t *nrxqp,
+ __out_opt uint32_t *ntxqp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_log_ctrl(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_stats_clear(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events);
+
+
+#if EFSYS_OPT_LOOPBACK
+extern __checkReturn efx_rc_t
+efx_mcdi_get_loopback_modes(
+ __in efx_nic_t *enp);
+#endif /* EFSYS_OPT_LOOPBACK */
+
+extern __checkReturn efx_rc_t
+efx_mcdi_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data);
+
+#define MCDI_IN(_emr, _type, _ofst) \
+ ((_type *)((_emr).emr_in_buf + (_ofst)))
+
+#define MCDI_IN2(_emr, _type, _ofst) \
+ MCDI_IN(_emr, _type, MC_CMD_ ## _ofst ## _OFST)
+
+#define MCDI_IN_SET_BYTE(_emr, _ofst, _value) \
+ EFX_POPULATE_BYTE_1(*MCDI_IN2(_emr, efx_byte_t, _ofst), \
+ EFX_BYTE_0, _value)
+
+#define MCDI_IN_SET_WORD(_emr, _ofst, _value) \
+ EFX_POPULATE_WORD_1(*MCDI_IN2(_emr, efx_word_t, _ofst), \
+ EFX_WORD_0, _value)
+
+#define MCDI_IN_SET_DWORD(_emr, _ofst, _value) \
+ EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ EFX_DWORD_0, _value)
+
+#define MCDI_IN_SET_DWORD_FIELD(_emr, _ofst, _field, _value) \
+ EFX_SET_DWORD_FIELD(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field, _value)
+
+#define MCDI_IN_POPULATE_DWORD_1(_emr, _ofst, _field1, _value1) \
+ EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1)
+
+#define MCDI_IN_POPULATE_DWORD_2(_emr, _ofst, _field1, _value1, \
+ _field2, _value2) \
+ EFX_POPULATE_DWORD_2(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2)
+
+#define MCDI_IN_POPULATE_DWORD_3(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_DWORD_3(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3)
+
+#define MCDI_IN_POPULATE_DWORD_4(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4) \
+ EFX_POPULATE_DWORD_4(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4)
+
+#define MCDI_IN_POPULATE_DWORD_5(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5) \
+ EFX_POPULATE_DWORD_5(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5)
+
+#define MCDI_IN_POPULATE_DWORD_6(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_DWORD_6(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6)
+
+#define MCDI_IN_POPULATE_DWORD_7(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7) \
+ EFX_POPULATE_DWORD_7(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7)
+
+#define MCDI_IN_POPULATE_DWORD_8(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7, \
+ _field8, _value8) \
+ EFX_POPULATE_DWORD_8(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7, \
+ MC_CMD_ ## _field8, _value8)
+
+#define MCDI_IN_POPULATE_DWORD_9(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7, \
+ _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_DWORD_9(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7, \
+ MC_CMD_ ## _field8, _value8, \
+ MC_CMD_ ## _field9, _value9)
+
+#define MCDI_IN_POPULATE_DWORD_10(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7, \
+ _field8, _value8, _field9, _value9, _field10, _value10) \
+ EFX_POPULATE_DWORD_10(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7, \
+ MC_CMD_ ## _field8, _value8, \
+ MC_CMD_ ## _field9, _value9, \
+ MC_CMD_ ## _field10, _value10)
+
+#define MCDI_OUT(_emr, _type, _ofst) \
+ ((_type *)((_emr).emr_out_buf + (_ofst)))
+
+#define MCDI_OUT2(_emr, _type, _ofst) \
+ MCDI_OUT(_emr, _type, MC_CMD_ ## _ofst ## _OFST)
+
+#define MCDI_OUT_BYTE(_emr, _ofst) \
+ EFX_BYTE_FIELD(*MCDI_OUT2(_emr, efx_byte_t, _ofst), \
+ EFX_BYTE_0)
+
+#define MCDI_OUT_WORD(_emr, _ofst) \
+ EFX_WORD_FIELD(*MCDI_OUT2(_emr, efx_word_t, _ofst), \
+ EFX_WORD_0)
+
+#define MCDI_OUT_DWORD(_emr, _ofst) \
+ EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \
+ EFX_DWORD_0)
+
+#define MCDI_OUT_DWORD_FIELD(_emr, _ofst, _field) \
+ EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field)
+
+#define MCDI_EV_FIELD(_eqp, _field) \
+ EFX_QWORD_FIELD(*_eqp, MCDI_EVENT_ ## _field)
+
+#define MCDI_CMD_DWORD_FIELD(_edp, _field) \
+ EFX_DWORD_FIELD(*_edp, MC_CMD_ ## _field)
+
+#define EFX_MCDI_HAVE_PRIVILEGE(mask, priv) \
+ (((mask) & (MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv)) == \
+ (MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv))
+
+typedef enum efx_mcdi_feature_id_e {
+ EFX_MCDI_FEATURE_FW_UPDATE = 0,
+ EFX_MCDI_FEATURE_LINK_CONTROL,
+ EFX_MCDI_FEATURE_MACADDR_CHANGE,
+ EFX_MCDI_FEATURE_MAC_SPOOFING,
+ EFX_MCDI_FEATURE_NIDS
+} efx_mcdi_feature_id_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_MCDI_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_mon.c b/src/seastar/dpdk/drivers/net/sfc/base/efx_mon.c
new file mode 100644
index 00000000..c2f1e97e
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_mon.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_NAMES
+
+static const char * const __efx_mon_name[] = {
+ "",
+ "sfx90x0",
+ "sfx91x0",
+ "sfx92x0"
+};
+
+ const char *
+efx_mon_name(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID);
+ EFSYS_ASSERT3U(encp->enc_mon_type, <, EFX_MON_NTYPES);
+ return (__efx_mon_name[encp->enc_mon_type]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+#if EFSYS_OPT_MON_MCDI
+static const efx_mon_ops_t __efx_mon_mcdi_ops = {
+#if EFSYS_OPT_MON_STATS
+ mcdi_mon_stats_update /* emo_stats_update */
+#endif /* EFSYS_OPT_MON_STATS */
+};
+#endif
+
+
+ __checkReturn efx_rc_t
+efx_mon_init(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mon_t *emp = &(enp->en_mon);
+ const efx_mon_ops_t *emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (enp->en_mod_flags & EFX_MOD_MON) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ enp->en_mod_flags |= EFX_MOD_MON;
+
+ emp->em_type = encp->enc_mon_type;
+
+ EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID);
+ switch (emp->em_type) {
+#if EFSYS_OPT_MON_MCDI
+ case EFX_MON_SFC90X0:
+ case EFX_MON_SFC91X0:
+ case EFX_MON_SFC92X0:
+ emop = &__efx_mon_mcdi_ops;
+ break;
+#endif
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ emp->em_emop = emop;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ emp->em_type = EFX_MON_INVALID;
+
+ enp->en_mod_flags &= ~EFX_MOD_MON;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_MON_STATS
+
+#if EFSYS_OPT_NAMES
+
+/* START MKCONFIG GENERATED MonitorStatNamesBlock 5daa2a5725ba734b */
+static const char * const __mon_stat_name[] = {
+ "value_2_5v",
+ "value_vccp1",
+ "value_vcc",
+ "value_5v",
+ "value_12v",
+ "value_vccp2",
+ "value_ext_temp",
+ "value_int_temp",
+ "value_ain1",
+ "value_ain2",
+ "controller_cooling",
+ "ext_cooling",
+ "1v",
+ "1_2v",
+ "1_8v",
+ "3_3v",
+ "1_2va",
+ "vref",
+ "vaoe",
+ "aoe_temperature",
+ "psu_aoe_temperature",
+ "psu_temperature",
+ "fan0",
+ "fan1",
+ "fan2",
+ "fan3",
+ "fan4",
+ "vaoe_in",
+ "iaoe",
+ "iaoe_in",
+ "nic_power",
+ "0_9v",
+ "i0_9v",
+ "i1_2v",
+ "0_9v_adc",
+ "controller_temperature2",
+ "vreg_temperature",
+ "vreg_0_9v_temperature",
+ "vreg_1_2v_temperature",
+ "int_vptat",
+ "controller_internal_adc_temperature",
+ "ext_vptat",
+ "controller_external_adc_temperature",
+ "ambient_temperature",
+ "airflow",
+ "vdd08d_vss08d_csr",
+ "vdd08d_vss08d_csr_extadc",
+ "hotpoint_temperature",
+ "phy_power_switch_port0",
+ "phy_power_switch_port1",
+ "mum_vcc",
+ "0v9_a",
+ "i0v9_a",
+ "0v9_a_temp",
+ "0v9_b",
+ "i0v9_b",
+ "0v9_b_temp",
+ "ccom_avreg_1v2_supply",
+ "ccom_avreg_1v2_supply_ext_adc",
+ "ccom_avreg_1v8_supply",
+ "ccom_avreg_1v8_supply_ext_adc",
+ "controller_master_vptat",
+ "controller_master_internal_temp",
+ "controller_master_vptat_ext_adc",
+ "controller_master_internal_temp_ext_adc",
+ "controller_slave_vptat",
+ "controller_slave_internal_temp",
+ "controller_slave_vptat_ext_adc",
+ "controller_slave_internal_temp_ext_adc",
+ "sodimm_vout",
+ "sodimm_0_temp",
+ "sodimm_1_temp",
+ "phy0_vcc",
+ "phy1_vcc",
+ "controller_tdiode_temp",
+ "board_front_temp",
+ "board_back_temp",
+};
+
+/* END MKCONFIG GENERATED MonitorStatNamesBlock */
+
+extern const char *
+efx_mon_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_mon_stat_t id)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(id, <, EFX_MON_NSTATS);
+ return (__mon_stat_name[id]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+ __checkReturn efx_rc_t
+efx_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values)
+{
+ efx_mon_t *emp = &(enp->en_mon);
+ const efx_mon_ops_t *emop = emp->em_emop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON);
+
+ return (emop->emo_stats_update(enp, esmp, values));
+}
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+ void
+efx_mon_fini(
+ __in efx_nic_t *enp)
+{
+ efx_mon_t *emp = &(enp->en_mon);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON);
+
+ emp->em_emop = NULL;
+
+ emp->em_type = EFX_MON_INVALID;
+
+ enp->en_mod_flags &= ~EFX_MOD_MON;
+}
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_nic.c b/src/seastar/dpdk/drivers/net/sfc/base/efx_nic.c
new file mode 100644
index 00000000..76caa744
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_nic.c
@@ -0,0 +1,1110 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+ __checkReturn efx_rc_t
+efx_family(
+ __in uint16_t venid,
+ __in uint16_t devid,
+ __out efx_family_t *efp)
+{
+ if (venid == EFX_PCI_VENID_SFC) {
+ switch (devid) {
+#if EFSYS_OPT_SIENA
+ case EFX_PCI_DEVID_SIENA_F1_UNINIT:
+ /*
+ * Hardware default for PF0 of uninitialised Siena.
+ * manftest must be able to cope with this device id.
+ */
+ *efp = EFX_FAMILY_SIENA;
+ return (0);
+
+ case EFX_PCI_DEVID_BETHPAGE:
+ case EFX_PCI_DEVID_SIENA:
+ *efp = EFX_FAMILY_SIENA;
+ return (0);
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT:
+ /*
+ * Hardware default for PF0 of uninitialised Huntington.
+ * manftest must be able to cope with this device id.
+ */
+ *efp = EFX_FAMILY_HUNTINGTON;
+ return (0);
+
+ case EFX_PCI_DEVID_FARMINGDALE:
+ case EFX_PCI_DEVID_GREENPORT:
+ *efp = EFX_FAMILY_HUNTINGTON;
+ return (0);
+
+ case EFX_PCI_DEVID_FARMINGDALE_VF:
+ case EFX_PCI_DEVID_GREENPORT_VF:
+ *efp = EFX_FAMILY_HUNTINGTON;
+ return (0);
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_PCI_DEVID_MEDFORD_PF_UNINIT:
+ /*
+ * Hardware default for PF0 of uninitialised Medford.
+ * manftest must be able to cope with this device id.
+ */
+ *efp = EFX_FAMILY_MEDFORD;
+ return (0);
+
+ case EFX_PCI_DEVID_MEDFORD:
+ *efp = EFX_FAMILY_MEDFORD;
+ return (0);
+
+ case EFX_PCI_DEVID_MEDFORD_VF:
+ *efp = EFX_FAMILY_MEDFORD;
+ return (0);
+#endif /* EFSYS_OPT_MEDFORD */
+
+ case EFX_PCI_DEVID_FALCON: /* Obsolete, not supported */
+ default:
+ break;
+ }
+ }
+
+ *efp = EFX_FAMILY_INVALID;
+ return (ENOTSUP);
+}
+
+
+#define EFX_BIU_MAGIC0 0x01234567
+#define EFX_BIU_MAGIC1 0xfedcba98
+
+ __checkReturn efx_rc_t
+efx_nic_biu_test(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+ efx_rc_t rc;
+
+ /*
+ * Write magic values to scratch registers 0 and 1, then
+ * verify that the values were written correctly. Interleave
+ * the accesses to ensure that the BIU is not just reading
+ * back the cached value that was last written.
+ */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) {
+ rc = EIO;
+ goto fail2;
+ }
+
+ /*
+ * Perform the same test, with the values swapped. This
+ * ensures that subsequent tests don't start with the correct
+ * values already written into the scratch registers.
+ */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) {
+ rc = EIO;
+ goto fail3;
+ }
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) {
+ rc = EIO;
+ goto fail4;
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_SIENA
+
+static const efx_nic_ops_t __efx_nic_siena_ops = {
+ siena_nic_probe, /* eno_probe */
+ NULL, /* eno_board_cfg */
+ NULL, /* eno_set_drv_limits */
+ siena_nic_reset, /* eno_reset */
+ siena_nic_init, /* eno_init */
+ NULL, /* eno_get_vi_pool */
+ NULL, /* eno_get_bar_region */
+#if EFSYS_OPT_DIAG
+ siena_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ siena_nic_fini, /* eno_fini */
+ siena_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+
+static const efx_nic_ops_t __efx_nic_hunt_ops = {
+ ef10_nic_probe, /* eno_probe */
+ hunt_board_cfg, /* eno_board_cfg */
+ ef10_nic_set_drv_limits, /* eno_set_drv_limits */
+ ef10_nic_reset, /* eno_reset */
+ ef10_nic_init, /* eno_init */
+ ef10_nic_get_vi_pool, /* eno_get_vi_pool */
+ ef10_nic_get_bar_region, /* eno_get_bar_region */
+#if EFSYS_OPT_DIAG
+ ef10_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ ef10_nic_fini, /* eno_fini */
+ ef10_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+
+static const efx_nic_ops_t __efx_nic_medford_ops = {
+ ef10_nic_probe, /* eno_probe */
+ medford_board_cfg, /* eno_board_cfg */
+ ef10_nic_set_drv_limits, /* eno_set_drv_limits */
+ ef10_nic_reset, /* eno_reset */
+ ef10_nic_init, /* eno_init */
+ ef10_nic_get_vi_pool, /* eno_get_vi_pool */
+ ef10_nic_get_bar_region, /* eno_get_bar_region */
+#if EFSYS_OPT_DIAG
+ ef10_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ ef10_nic_fini, /* eno_fini */
+ ef10_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_MEDFORD */
+
+
+ __checkReturn efx_rc_t
+efx_nic_create(
+ __in efx_family_t family,
+ __in efsys_identifier_t *esip,
+ __in efsys_bar_t *esbp,
+ __in efsys_lock_t *eslp,
+ __deref_out efx_nic_t **enpp)
+{
+ efx_nic_t *enp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(family, >, EFX_FAMILY_INVALID);
+ EFSYS_ASSERT3U(family, <, EFX_FAMILY_NTYPES);
+
+ /* Allocate a NIC object */
+ EFSYS_KMEM_ALLOC(esip, sizeof (efx_nic_t), enp);
+
+ if (enp == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ enp->en_magic = EFX_NIC_MAGIC;
+
+ switch (family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ enp->en_enop = &__efx_nic_siena_ops;
+ enp->en_features =
+ EFX_FEATURE_IPV6 |
+ EFX_FEATURE_LFSR_HASH_INSERT |
+ EFX_FEATURE_LINK_EVENTS |
+ EFX_FEATURE_PERIODIC_MAC_STATS |
+ EFX_FEATURE_MCDI |
+ EFX_FEATURE_LOOKAHEAD_SPLIT |
+ EFX_FEATURE_MAC_HEADER_FILTERS |
+ EFX_FEATURE_TX_SRC_FILTERS;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ enp->en_enop = &__efx_nic_hunt_ops;
+ enp->en_features =
+ EFX_FEATURE_IPV6 |
+ EFX_FEATURE_LINK_EVENTS |
+ EFX_FEATURE_PERIODIC_MAC_STATS |
+ EFX_FEATURE_MCDI |
+ EFX_FEATURE_MAC_HEADER_FILTERS |
+ EFX_FEATURE_MCDI_DMA |
+ EFX_FEATURE_PIO_BUFFERS |
+ EFX_FEATURE_FW_ASSISTED_TSO |
+ EFX_FEATURE_FW_ASSISTED_TSO_V2 |
+ EFX_FEATURE_PACKED_STREAM;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ enp->en_enop = &__efx_nic_medford_ops;
+ /*
+ * FW_ASSISTED_TSO omitted as Medford only supports firmware
+ * assisted TSO version 2, not the v1 scheme used on Huntington.
+ */
+ enp->en_features =
+ EFX_FEATURE_IPV6 |
+ EFX_FEATURE_LINK_EVENTS |
+ EFX_FEATURE_PERIODIC_MAC_STATS |
+ EFX_FEATURE_MCDI |
+ EFX_FEATURE_MAC_HEADER_FILTERS |
+ EFX_FEATURE_MCDI_DMA |
+ EFX_FEATURE_PIO_BUFFERS |
+ EFX_FEATURE_FW_ASSISTED_TSO_V2 |
+ EFX_FEATURE_PACKED_STREAM;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ enp->en_family = family;
+ enp->en_esip = esip;
+ enp->en_esbp = esbp;
+ enp->en_eslp = eslp;
+
+ *enpp = enp;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ enp->en_magic = 0;
+
+ /* Free the NIC object */
+ EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_probe(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+#if EFSYS_OPT_MCDI
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+#endif /* EFSYS_OPT_MCDI */
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_PROBE));
+
+ enop = enp->en_enop;
+ if ((rc = enop->eno_probe(enp)) != 0)
+ goto fail1;
+
+ if ((rc = efx_phy_probe(enp)) != 0)
+ goto fail2;
+
+ enp->en_mod_flags |= EFX_MOD_PROBE;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ enop->eno_unprobe(enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (enop->eno_set_drv_limits != NULL) {
+ if ((rc = enop->eno_set_drv_limits(enp, edlp)) != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enop->eno_get_bar_region == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ if ((rc = (enop->eno_get_bar_region)(enp,
+ region, offsetp, sizep)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *evq_countp,
+ __out uint32_t *rxq_countp,
+ __out uint32_t *txq_countp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enop->eno_get_vi_pool != NULL) {
+ uint32_t vi_count = 0;
+
+ if ((rc = (enop->eno_get_vi_pool)(enp, &vi_count)) != 0)
+ goto fail1;
+
+ *evq_countp = vi_count;
+ *rxq_countp = vi_count;
+ *txq_countp = vi_count;
+ } else {
+ /* Use NIC limits as default value */
+ *evq_countp = encp->enc_evq_limit;
+ *rxq_countp = encp->enc_rxq_limit;
+ *txq_countp = encp->enc_txq_limit;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_nic_init(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (enp->en_mod_flags & EFX_MOD_NIC) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = enop->eno_init(enp)) != 0)
+ goto fail2;
+
+ enp->en_mod_flags |= EFX_MOD_NIC;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_nic_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE);
+ EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_NIC);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+
+ enop->eno_fini(enp);
+
+ enp->en_mod_flags &= ~EFX_MOD_NIC;
+}
+
+ void
+efx_nic_unprobe(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+#if EFSYS_OPT_MCDI
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+#endif /* EFSYS_OPT_MCDI */
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+
+ efx_phy_unprobe(enp);
+
+ enop->eno_unprobe(enp);
+
+ enp->en_mod_flags &= ~EFX_MOD_PROBE;
+}
+
+ void
+efx_nic_destroy(
+ __in efx_nic_t *enp)
+{
+ efsys_identifier_t *esip = enp->en_esip;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0);
+
+ enp->en_family = EFX_FAMILY_INVALID;
+ enp->en_esip = NULL;
+ enp->en_esbp = NULL;
+ enp->en_eslp = NULL;
+
+ enp->en_enop = NULL;
+
+ enp->en_magic = 0;
+
+ /* Free the NIC object */
+ EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_reset(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ unsigned int mod_flags;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE);
+ /*
+ * All modules except the MCDI, PROBE, NVRAM, VPD, MON
+ * (which we do not reset here) must have been shut down or never
+ * initialized.
+ *
+ * A rule of thumb here is: If the controller or MC reboots, is *any*
+ * state lost. If it's lost and needs reapplying, then the module
+ * *must* not be initialised during the reset.
+ */
+ mod_flags = enp->en_mod_flags;
+ mod_flags &= ~(EFX_MOD_MCDI | EFX_MOD_PROBE | EFX_MOD_NVRAM |
+ EFX_MOD_VPD | EFX_MOD_MON);
+ EFSYS_ASSERT3U(mod_flags, ==, 0);
+ if (mod_flags != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = enop->eno_reset(enp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ const efx_nic_cfg_t *
+efx_nic_cfg_get(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ return (&(enp->en_nic_cfg));
+}
+
+ __checkReturn efx_rc_t
+efx_nic_get_fw_version(
+ __in efx_nic_t *enp,
+ __out efx_nic_fw_info_t *enfip)
+{
+ uint16_t mc_fw_version[4];
+ efx_rc_t rc;
+
+ if (enfip == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ rc = efx_mcdi_version(enp, mc_fw_version, NULL, NULL);
+ if (rc != 0)
+ goto fail2;
+
+ rc = efx_mcdi_get_capabilities(enp, NULL,
+ &enfip->enfi_rx_dpcpu_fw_id,
+ &enfip->enfi_tx_dpcpu_fw_id,
+ NULL, NULL);
+ if (rc == 0) {
+ enfip->enfi_dpcpu_fw_ids_valid = B_TRUE;
+ } else if (rc == ENOTSUP) {
+ enfip->enfi_dpcpu_fw_ids_valid = B_FALSE;
+ enfip->enfi_rx_dpcpu_fw_id = 0;
+ enfip->enfi_tx_dpcpu_fw_id = 0;
+ } else {
+ goto fail3;
+ }
+
+ memcpy(enfip->enfi_mc_fw_version, mc_fw_version, sizeof(mc_fw_version));
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_nic_register_test(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
+
+ if ((rc = enop->eno_register_test(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_test_registers(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in size_t count)
+{
+ unsigned int bit;
+ efx_oword_t original;
+ efx_oword_t reg;
+ efx_oword_t buf;
+ efx_rc_t rc;
+
+ while (count > 0) {
+ /* This function is only suitable for registers */
+ EFSYS_ASSERT(rsp->rows == 1);
+
+ /* bit sweep on and off */
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &original,
+ B_TRUE);
+ for (bit = 0; bit < 128; bit++) {
+ /* Is this bit in the mask? */
+ if (~(rsp->mask.eo_u32[bit >> 5]) & (1 << bit))
+ continue;
+
+ /* Test this bit can be set in isolation */
+ reg = original;
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFX_SET_OWORD_BIT(reg, bit);
+
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &reg,
+ B_TRUE);
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf,
+ B_TRUE);
+
+ EFX_AND_OWORD(buf, rsp->mask);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ /* Test this bit can be cleared in isolation */
+ EFX_OR_OWORD(reg, rsp->mask);
+ EFX_CLEAR_OWORD_BIT(reg, bit);
+
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &reg,
+ B_TRUE);
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf,
+ B_TRUE);
+
+ EFX_AND_OWORD(buf, rsp->mask);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail2;
+ }
+ }
+
+ /* Restore the old value */
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original,
+ B_TRUE);
+
+ --count;
+ ++rsp;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Restore the old value */
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original, B_TRUE);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_test_tables(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in efx_pattern_type_t pattern,
+ __in size_t count)
+{
+ efx_sram_pattern_fn_t func;
+ unsigned int index;
+ unsigned int address;
+ efx_oword_t reg;
+ efx_oword_t buf;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(pattern < EFX_PATTERN_NTYPES);
+ func = __efx_sram_pattern_fns[pattern];
+
+ while (count > 0) {
+ /* Write */
+ address = rsp->address;
+ for (index = 0; index < rsp->rows; ++index) {
+ func(2 * index + 0, B_FALSE, &reg.eo_qword[0]);
+ func(2 * index + 1, B_FALSE, &reg.eo_qword[1]);
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFSYS_BAR_WRITEO(enp->en_esbp, address, &reg, B_TRUE);
+
+ address += rsp->step;
+ }
+
+ /* Read */
+ address = rsp->address;
+ for (index = 0; index < rsp->rows; ++index) {
+ func(2 * index + 0, B_FALSE, &reg.eo_qword[0]);
+ func(2 * index + 1, B_FALSE, &reg.eo_qword[1]);
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFSYS_BAR_READO(enp->en_esbp, address, &buf, B_TRUE);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ address += rsp->step;
+ }
+
+ ++rsp;
+ --count;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+#if EFSYS_OPT_LOOPBACK
+
+extern void
+efx_loopback_mask(
+ __in efx_loopback_kind_t loopback_kind,
+ __out efx_qword_t *maskp)
+{
+ efx_qword_t mask;
+
+ EFSYS_ASSERT3U(loopback_kind, <, EFX_LOOPBACK_NKINDS);
+ EFSYS_ASSERT(maskp != NULL);
+
+ /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XPORT == EFX_LOOPBACK_XPORT);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII_WS == EFX_LOOPBACK_XGMII_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS == EFX_LOOPBACK_XAUI_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_FAR ==
+ EFX_LOOPBACK_XAUI_WS_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_NEAR ==
+ EFX_LOOPBACK_XAUI_WS_NEAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_WS == EFX_LOOPBACK_GMII_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS == EFX_LOOPBACK_XFI_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS_FAR ==
+ EFX_LOOPBACK_XFI_WS_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS_WS == EFX_LOOPBACK_PHYXS_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT == EFX_LOOPBACK_PMA_INT);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_NEAR == EFX_LOOPBACK_SD_NEAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FAR == EFX_LOOPBACK_SD_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT_WS ==
+ EFX_LOOPBACK_PMA_INT_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP2_WS ==
+ EFX_LOOPBACK_SD_FEP2_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP1_5_WS ==
+ EFX_LOOPBACK_SD_FEP1_5_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP_WS == EFX_LOOPBACK_SD_FEP_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FES_WS == EFX_LOOPBACK_SD_FES_WS);
+
+ /* Build bitmask of possible loopback types */
+ EFX_ZERO_QWORD(mask);
+
+ if ((loopback_kind == EFX_LOOPBACK_KIND_OFF) ||
+ (loopback_kind == EFX_LOOPBACK_KIND_ALL)) {
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_OFF);
+ }
+
+ if ((loopback_kind == EFX_LOOPBACK_KIND_MAC) ||
+ (loopback_kind == EFX_LOOPBACK_KIND_ALL)) {
+ /*
+ * The "MAC" grouping has historically been used by drivers to
+ * mean loopbacks supported by on-chip hardware. Keep that
+ * meaning here, and include on-chip PHY layer loopbacks.
+ */
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_DATA);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMAC);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGMII);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGXS);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XAUI);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMII);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SGMII);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGBR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XFI);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XAUI_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMII_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SGMII_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XFI_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PMA_INT);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SD_NEAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SD_FAR);
+ }
+
+ if ((loopback_kind == EFX_LOOPBACK_KIND_PHY) ||
+ (loopback_kind == EFX_LOOPBACK_KIND_ALL)) {
+ /*
+ * The "PHY" grouping has historically been used by drivers to
+ * mean loopbacks supported by off-chip hardware. Keep that
+ * meaning here.
+ */
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GPHY);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PHY_XS);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PCS);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PMA_PMD);
+ }
+
+ *maskp = mask;
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_loopback_modes(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LOOPBACK_MODES_IN_LEN,
+ MC_CMD_GET_LOOPBACK_MODES_OUT_LEN)];
+ efx_qword_t mask;
+ efx_qword_t modes;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LOOPBACK_MODES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LOOPBACK_MODES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LOOPBACK_MODES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used <
+ MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ /*
+ * We assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespaces agree
+ * in efx_loopback_mask() and in siena_phy.c:siena_phy_get_link().
+ */
+ efx_loopback_mask(EFX_LOOPBACK_KIND_ALL, &mask);
+
+ EFX_AND_QWORD(mask,
+ *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_SUGGESTED));
+
+ modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_100M);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_100FDX] = modes;
+
+ modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_1G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_1000FDX] = modes;
+
+ modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_10G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_10000FDX] = modes;
+
+ if (req.emr_out_length_used >=
+ MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN) {
+ /* Response includes 40G loopback modes */
+ modes =
+ *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_40G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_40000FDX] = modes;
+ }
+
+ EFX_ZERO_QWORD(modes);
+ EFX_SET_QWORD_BIT(modes, EFX_LOOPBACK_OFF);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_1000FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_10000FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_40000FDX]);
+ encp->enc_loopback_types[EFX_LINK_UNKNOWN] = modes;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ __checkReturn efx_rc_t
+efx_nic_calculate_pcie_link_bandwidth(
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t lane_bandwidth;
+ uint32_t total_bandwidth;
+ efx_rc_t rc;
+
+ if ((pcie_link_width == 0) || (pcie_link_width > 16) ||
+ !ISP2(pcie_link_width)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ switch (pcie_link_gen) {
+ case EFX_PCIE_LINK_SPEED_GEN1:
+ /* 2.5 Gb/s raw bandwidth with 8b/10b encoding */
+ lane_bandwidth = 2000;
+ break;
+ case EFX_PCIE_LINK_SPEED_GEN2:
+ /* 5.0 Gb/s raw bandwidth with 8b/10b encoding */
+ lane_bandwidth = 4000;
+ break;
+ case EFX_PCIE_LINK_SPEED_GEN3:
+ /* 8.0 Gb/s raw bandwidth with 128b/130b encoding */
+ lane_bandwidth = 7877;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ total_bandwidth = lane_bandwidth * pcie_link_width;
+ *bandwidth_mbpsp = total_bandwidth;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_nic_check_pcie_link_speed(
+ __in efx_nic_t *enp,
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out efx_pcie_link_performance_t *resultp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t bandwidth;
+ efx_pcie_link_performance_t result;
+ efx_rc_t rc;
+
+ if ((encp->enc_required_pcie_bandwidth_mbps == 0) ||
+ (pcie_link_width == 0) || (pcie_link_width == 32) ||
+ (pcie_link_gen == 0)) {
+ /*
+ * No usable info on what is required and/or in use. In virtual
+ * machines, sometimes the PCIe link width is reported as 0 or
+ * 32, or the speed as 0.
+ */
+ result = EFX_PCIE_LINK_PERFORMANCE_UNKNOWN_BANDWIDTH;
+ goto out;
+ }
+
+ /* Calculate the available bandwidth in megabits per second */
+ rc = efx_nic_calculate_pcie_link_bandwidth(pcie_link_width,
+ pcie_link_gen, &bandwidth);
+ if (rc != 0)
+ goto fail1;
+
+ if (bandwidth < encp->enc_required_pcie_bandwidth_mbps) {
+ result = EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_BANDWIDTH;
+ } else if (pcie_link_gen < encp->enc_max_pcie_link_gen) {
+ /* The link provides enough bandwidth but not optimal latency */
+ result = EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_LATENCY;
+ } else {
+ result = EFX_PCIE_LINK_PERFORMANCE_OPTIMAL;
+ }
+
+out:
+ *resultp = result;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_nvram.c b/src/seastar/dpdk/drivers/net/sfc/base/efx_nvram.c
new file mode 100644
index 00000000..6ee2a71d
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_nvram.c
@@ -0,0 +1,1044 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_NVRAM
+
+#if EFSYS_OPT_SIENA
+
+static const efx_nvram_ops_t __efx_nvram_siena_ops = {
+#if EFSYS_OPT_DIAG
+ siena_nvram_test, /* envo_test */
+#endif /* EFSYS_OPT_DIAG */
+ siena_nvram_type_to_partn, /* envo_type_to_partn */
+ siena_nvram_partn_size, /* envo_partn_size */
+ siena_nvram_partn_rw_start, /* envo_partn_rw_start */
+ siena_nvram_partn_read, /* envo_partn_read */
+ siena_nvram_partn_erase, /* envo_partn_erase */
+ siena_nvram_partn_write, /* envo_partn_write */
+ siena_nvram_partn_rw_finish, /* envo_partn_rw_finish */
+ siena_nvram_partn_get_version, /* envo_partn_get_version */
+ siena_nvram_partn_set_version, /* envo_partn_set_version */
+ NULL, /* envo_partn_validate */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static const efx_nvram_ops_t __efx_nvram_ef10_ops = {
+#if EFSYS_OPT_DIAG
+ ef10_nvram_test, /* envo_test */
+#endif /* EFSYS_OPT_DIAG */
+ ef10_nvram_type_to_partn, /* envo_type_to_partn */
+ ef10_nvram_partn_size, /* envo_partn_size */
+ ef10_nvram_partn_rw_start, /* envo_partn_rw_start */
+ ef10_nvram_partn_read, /* envo_partn_read */
+ ef10_nvram_partn_erase, /* envo_partn_erase */
+ ef10_nvram_partn_write, /* envo_partn_write */
+ ef10_nvram_partn_rw_finish, /* envo_partn_rw_finish */
+ ef10_nvram_partn_get_version, /* envo_partn_get_version */
+ ef10_nvram_partn_set_version, /* envo_partn_set_version */
+ ef10_nvram_buffer_validate, /* envo_buffer_validate */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_nvram_init(
+ __in efx_nic_t *enp)
+{
+ const efx_nvram_ops_t *envop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NVRAM));
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ envop = &__efx_nvram_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ envop = &__efx_nvram_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ envop = &__efx_nvram_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ enp->en_envop = envop;
+ enp->en_mod_flags |= EFX_MOD_NVRAM;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_nvram_test(
+ __in efx_nic_t *enp)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ if ((rc = envop->envo_test(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+ __checkReturn efx_rc_t
+efx_nvram_size(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out size_t *sizep)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_size(enp, partn, sizep)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ *sizep = 0;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_get_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4])
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_get_version(enp, partn,
+ subtypep, version)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_rw_start(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out_opt size_t *chunk_sizep)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_rw_start(enp, partn, chunk_sizep)) != 0)
+ goto fail2;
+
+ enp->en_nvram_locked = type;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_read_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_read(enp, partn, offset, data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_erase(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ unsigned int offset = 0;
+ size_t size = 0;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_size(enp, partn, &size)) != 0)
+ goto fail2;
+
+ if ((rc = envop->envo_partn_erase(enp, partn, offset, size)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_write_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_write(enp, partn, offset, data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_rw_finish(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_rw_finish(enp, partn)) != 0)
+ goto fail2;
+
+ enp->en_nvram_locked = EFX_NVRAM_INVALID;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+ enp->en_nvram_locked = EFX_NVRAM_INVALID;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_set_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_ecount(4) uint16_t version[4])
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+ /*
+ * The Siena implementation of envo_set_version() will attempt to
+ * acquire the NVRAM_UPDATE lock for the DYNAMIC_CONFIG sector.
+ * Therefore, you can't have already acquired the NVRAM_UPDATE lock.
+ */
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_set_version(enp, partn, version)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Validate buffer contents (before writing to flash) */
+ __checkReturn efx_rc_t
+efx_nvram_validate(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if (envop->envo_type_to_partn != NULL &&
+ ((rc = envop->envo_buffer_validate(enp, partn,
+ partn_data, partn_size)) != 0))
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+void
+efx_nvram_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
+
+ enp->en_envop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_NVRAM;
+}
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+
+/*
+ * Internal MCDI request handling
+ */
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_partitions(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __out unsigned int *npartnp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_PARTITIONS_IN_LEN,
+ MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX)];
+ unsigned int npartn;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_PARTITIONS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_PARTITIONS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+ npartn = MCDI_OUT_DWORD(req, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_PARTITIONS_OUT_LEN(npartn)) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ if (size < npartn * sizeof (uint32_t)) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+
+ *npartnp = npartn;
+
+ memcpy(data,
+ MCDI_OUT2(req, uint32_t, NVRAM_PARTITIONS_OUT_TYPE_ID),
+ (npartn * sizeof (uint32_t)));
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_metadata(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4],
+ __out_bcount_opt(size) char *descp,
+ __in size_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_METADATA_IN_LEN,
+ MC_CMD_NVRAM_METADATA_OUT_LENMAX)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_METADATA;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_METADATA_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_METADATA_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_METADATA_IN_TYPE, partn);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_METADATA_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS,
+ NVRAM_METADATA_OUT_SUBTYPE_VALID)) {
+ *subtypep = MCDI_OUT_DWORD(req, NVRAM_METADATA_OUT_SUBTYPE);
+ } else {
+ *subtypep = 0;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS,
+ NVRAM_METADATA_OUT_VERSION_VALID)) {
+ version[0] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_W);
+ version[1] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_X);
+ version[2] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_Y);
+ version[3] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_Z);
+ } else {
+ version[0] = version[1] = version[2] = version[3] = 0;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS,
+ NVRAM_METADATA_OUT_DESCRIPTION_VALID)) {
+ /* Return optional descrition string */
+ if ((descp != NULL) && (size > 0)) {
+ size_t desclen;
+
+ descp[0] = '\0';
+ desclen = (req.emr_out_length_used
+ - MC_CMD_NVRAM_METADATA_OUT_LEN(0));
+
+ EFSYS_ASSERT3U(desclen, <=,
+ MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM);
+
+ if (size < desclen) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+
+ memcpy(descp, MCDI_OUT2(req, char,
+ NVRAM_METADATA_OUT_DESCRIPTION),
+ desclen);
+
+ /* Ensure string is NUL terminated */
+ descp[desclen] = '\0';
+ }
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_info(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt size_t *sizep,
+ __out_opt uint32_t *addressp,
+ __out_opt uint32_t *erase_sizep,
+ __out_opt uint32_t *write_sizep)
+{
+ uint8_t payload[MAX(MC_CMD_NVRAM_INFO_IN_LEN,
+ MC_CMD_NVRAM_INFO_V2_OUT_LEN)];
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_INFO_V2_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_INFO_IN_TYPE, partn);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_INFO_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (sizep)
+ *sizep = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_SIZE);
+
+ if (addressp)
+ *addressp = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_PHYSADDR);
+
+ if (erase_sizep)
+ *erase_sizep = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_ERASESIZE);
+
+ if (write_sizep) {
+ *write_sizep =
+ (req.emr_out_length_used <
+ MC_CMD_NVRAM_INFO_V2_OUT_LEN) ?
+ 0 : MCDI_OUT_DWORD(req, NVRAM_INFO_V2_OUT_WRITESIZE);
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * MC_CMD_NVRAM_UPDATE_START_V2 must be used to support firmware-verified
+ * NVRAM updates. Older firmware will ignore the flags field in the request.
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ uint8_t payload[MAX(MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN,
+ MC_CMD_NVRAM_UPDATE_START_OUT_LEN)];
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_UPDATE_START;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_UPDATE_START_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_START_V2_IN_TYPE, partn);
+
+ MCDI_IN_POPULATE_DWORD_1(req, NVRAM_UPDATE_START_V2_IN_FLAGS,
+ NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_READ_IN_V2_LEN,
+ MC_CMD_NVRAM_READ_OUT_LENMAX)];
+ efx_rc_t rc;
+
+ if (size > MC_CMD_NVRAM_READ_OUT_LENMAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_READ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_READ_IN_V2_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_READ_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_OFFSET, offset);
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_LENGTH, size);
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_MODE, mode);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_READ_OUT_LEN(size)) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ memcpy(data,
+ MCDI_OUT2(req, uint8_t, NVRAM_READ_OUT_READ_BUFFER),
+ size);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __in size_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_ERASE_IN_LEN,
+ MC_CMD_NVRAM_ERASE_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_ERASE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_ERASE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_ERASE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_OFFSET, offset);
+ MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_LENGTH, size);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * The NVRAM_WRITE MCDI command is a V1 command and so is supported by both
+ * Sienna and EF10 based boards. However EF10 based boards support the use
+ * of this command with payloads up to the maximum MCDI V2 payload length.
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MCDI_CTL_SDU_LEN_MAX_V1,
+ MCDI_CTL_SDU_LEN_MAX_V2)];
+ efx_rc_t rc;
+ size_t max_data_size;
+
+ max_data_size = enp->en_nic_cfg.enc_mcdi_max_payload_length
+ - MC_CMD_NVRAM_WRITE_IN_LEN(0);
+ EFSYS_ASSERT3U(enp->en_nic_cfg.enc_mcdi_max_payload_length, >, 0);
+ EFSYS_ASSERT3U(max_data_size, <,
+ enp->en_nic_cfg.enc_mcdi_max_payload_length);
+
+ if (size > max_data_size) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_WRITE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_WRITE_IN_LEN(size);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_WRITE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_OFFSET, offset);
+ MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_LENGTH, size);
+
+ memcpy(MCDI_IN2(req, uint8_t, NVRAM_WRITE_IN_WRITE_BUFFER),
+ data, size);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2 must be used to support firmware-verified
+ * NVRAM updates. Older firmware will ignore the flags field in the request.
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t reboot,
+ __out_opt uint32_t *resultp)
+{
+ const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN,
+ MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN)];
+ uint32_t result = 0; /* FIXME: use MC_CMD_NVRAM_VERIFY_RC_UNKNOWN */
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_UPDATE_FINISH;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_FINISH_V2_IN_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_FINISH_V2_IN_REBOOT, reboot);
+
+ MCDI_IN_POPULATE_DWORD_1(req, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (encp->enc_fw_verified_nvram_update_required == B_FALSE) {
+ /* Report success if verified updates are not supported. */
+ result = MC_CMD_NVRAM_VERIFY_RC_SUCCESS;
+ } else {
+ /* Firmware-verified NVRAM updates are required */
+ if (req.emr_out_length_used <
+ MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+ result =
+ MCDI_OUT_DWORD(req, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
+
+ if (result != MC_CMD_NVRAM_VERIFY_RC_SUCCESS) {
+ /* Mandatory verification failed */
+ rc = EINVAL;
+ goto fail3;
+ }
+ }
+
+ if (resultp != NULL)
+ *resultp = result;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Always report verification result */
+ if (resultp != NULL)
+ *resultp = result;
+
+ return (rc);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_test(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_TEST_IN_LEN,
+ MC_CMD_NVRAM_TEST_OUT_LEN)];
+ int result;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_TEST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_TEST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_TEST_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_TEST_IN_TYPE, partn);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_TEST_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ result = MCDI_OUT_DWORD(req, NVRAM_TEST_OUT_RESULT);
+ if (result == MC_CMD_NVRAM_TEST_FAIL) {
+
+ EFSYS_PROBE1(nvram_test_failure, int, partn);
+
+ rc = (EINVAL);
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+
+#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_phy.c b/src/seastar/dpdk/drivers/net/sfc/base/efx_phy.c
new file mode 100644
index 00000000..752cd52e
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_phy.c
@@ -0,0 +1,561 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_SIENA
+static const efx_phy_ops_t __efx_phy_siena_ops = {
+ siena_phy_power, /* epo_power */
+ NULL, /* epo_reset */
+ siena_phy_reconfigure, /* epo_reconfigure */
+ siena_phy_verify, /* epo_verify */
+ siena_phy_oui_get, /* epo_oui_get */
+#if EFSYS_OPT_PHY_STATS
+ siena_phy_stats_update, /* epo_stats_update */
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_BIST
+ NULL, /* epo_bist_enable_offline */
+ siena_phy_bist_start, /* epo_bist_start */
+ siena_phy_bist_poll, /* epo_bist_poll */
+ siena_phy_bist_stop, /* epo_bist_stop */
+#endif /* EFSYS_OPT_BIST */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_phy_ops_t __efx_phy_ef10_ops = {
+ ef10_phy_power, /* epo_power */
+ NULL, /* epo_reset */
+ ef10_phy_reconfigure, /* epo_reconfigure */
+ ef10_phy_verify, /* epo_verify */
+ ef10_phy_oui_get, /* epo_oui_get */
+#if EFSYS_OPT_PHY_STATS
+ ef10_phy_stats_update, /* epo_stats_update */
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_BIST
+ ef10_bist_enable_offline, /* epo_bist_enable_offline */
+ ef10_bist_start, /* epo_bist_start */
+ ef10_bist_poll, /* epo_bist_poll */
+ ef10_bist_stop, /* epo_bist_stop */
+#endif /* EFSYS_OPT_BIST */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_phy_probe(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ const efx_phy_ops_t *epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ epp->ep_port = encp->enc_port;
+ epp->ep_phy_type = encp->enc_phy_type;
+
+ /* Hook in operations structure */
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ epop = &__efx_phy_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ epop = &__efx_phy_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ epop = &__efx_phy_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ epp->ep_epop = epop;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ epp->ep_port = 0;
+ epp->ep_phy_type = 0;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_phy_verify(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_verify(enp));
+}
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+
+ __checkReturn efx_rc_t
+efx_phy_led_set(
+ __in efx_nic_t *enp,
+ __in efx_phy_led_mode_t mode)
+{
+ efx_nic_cfg_t *encp = (&enp->en_nic_cfg);
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ uint32_t mask;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (epp->ep_phy_led_mode == mode)
+ goto done;
+
+ mask = (1 << EFX_PHY_LED_DEFAULT);
+ mask |= encp->enc_led_mask;
+
+ if (!((1 << mode) & mask)) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(mode, <, EFX_PHY_LED_NMODES);
+ epp->ep_phy_led_mode = mode;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail2;
+
+done:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ void
+efx_phy_adv_cap_get(
+ __in efx_nic_t *enp,
+ __in uint32_t flag,
+ __out uint32_t *maskp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ switch (flag) {
+ case EFX_PHY_CAP_CURRENT:
+ *maskp = epp->ep_adv_cap_mask;
+ break;
+ case EFX_PHY_CAP_DEFAULT:
+ *maskp = epp->ep_default_adv_cap_mask;
+ break;
+ case EFX_PHY_CAP_PERM:
+ *maskp = epp->ep_phy_cap_mask;
+ break;
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ break;
+ }
+}
+
+ __checkReturn efx_rc_t
+efx_phy_adv_cap_set(
+ __in efx_nic_t *enp,
+ __in uint32_t mask)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ uint32_t old_mask;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if ((mask & ~epp->ep_phy_cap_mask) != 0) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (epp->ep_adv_cap_mask == mask)
+ goto done;
+
+ old_mask = epp->ep_adv_cap_mask;
+ epp->ep_adv_cap_mask = mask;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail2;
+
+done:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ epp->ep_adv_cap_mask = old_mask;
+ /* Reconfigure for robustness */
+ if (epop->epo_reconfigure(enp) != 0) {
+ /*
+ * We may have an inconsistent view of our advertised speed
+ * capabilities.
+ */
+ EFSYS_ASSERT(0);
+ }
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_phy_lp_cap_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ *maskp = epp->ep_lp_cap_mask;
+}
+
+ __checkReturn efx_rc_t
+efx_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_oui_get(enp, ouip));
+}
+
+ void
+efx_phy_media_type_get(
+ __in efx_nic_t *enp,
+ __out efx_phy_media_type_t *typep)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (epp->ep_module_type != EFX_PHY_MEDIA_INVALID)
+ *typep = epp->ep_module_type;
+ else
+ *typep = epp->ep_fixed_port_type;
+}
+
+ __checkReturn efx_rc_t
+efx_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(data != NULL);
+
+ if ((uint32_t)offset + len > 0xff) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_phy_module_get_info(enp, dev_addr,
+ offset, len, data)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_PHY_STATS
+
+#if EFSYS_OPT_NAMES
+
+/* START MKCONFIG GENERATED PhyStatNamesBlock af9ffa24da3bc100 */
+static const char * const __efx_phy_stat_name[] = {
+ "oui",
+ "pma_pmd_link_up",
+ "pma_pmd_rx_fault",
+ "pma_pmd_tx_fault",
+ "pma_pmd_rev_a",
+ "pma_pmd_rev_b",
+ "pma_pmd_rev_c",
+ "pma_pmd_rev_d",
+ "pcs_link_up",
+ "pcs_rx_fault",
+ "pcs_tx_fault",
+ "pcs_ber",
+ "pcs_block_errors",
+ "phy_xs_link_up",
+ "phy_xs_rx_fault",
+ "phy_xs_tx_fault",
+ "phy_xs_align",
+ "phy_xs_sync_a",
+ "phy_xs_sync_b",
+ "phy_xs_sync_c",
+ "phy_xs_sync_d",
+ "an_link_up",
+ "an_master",
+ "an_local_rx_ok",
+ "an_remote_rx_ok",
+ "cl22ext_link_up",
+ "snr_a",
+ "snr_b",
+ "snr_c",
+ "snr_d",
+ "pma_pmd_signal_a",
+ "pma_pmd_signal_b",
+ "pma_pmd_signal_c",
+ "pma_pmd_signal_d",
+ "an_complete",
+ "pma_pmd_rev_major",
+ "pma_pmd_rev_minor",
+ "pma_pmd_rev_micro",
+ "pcs_fw_version_0",
+ "pcs_fw_version_1",
+ "pcs_fw_version_2",
+ "pcs_fw_version_3",
+ "pcs_fw_build_yy",
+ "pcs_fw_build_mm",
+ "pcs_fw_build_dd",
+ "pcs_op_mode",
+};
+
+/* END MKCONFIG GENERATED PhyStatNamesBlock */
+
+ const char *
+efx_phy_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_phy_stat_t type)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(type, <, EFX_PHY_NSTATS);
+
+ return (__efx_phy_stat_name[type]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+ __checkReturn efx_rc_t
+efx_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_stats_update(enp, esmp, stat));
+}
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+
+#if EFSYS_OPT_BIST
+
+ __checkReturn efx_rc_t
+efx_bist_enable_offline(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ if (epop->epo_bist_enable_offline == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = epop->epo_bist_enable_offline(enp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+ __checkReturn efx_rc_t
+efx_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN);
+ EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES);
+ EFSYS_ASSERT3U(epp->ep_current_bist, ==, EFX_BIST_TYPE_UNKNOWN);
+
+ if (epop->epo_bist_start == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = epop->epo_bist_start(enp, type)) != 0)
+ goto fail2;
+
+ epp->ep_current_bist = type;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt uint32_t *value_maskp,
+ __out_ecount_opt(count) unsigned long *valuesp,
+ __in size_t count)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN);
+ EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES);
+ EFSYS_ASSERT3U(epp->ep_current_bist, ==, type);
+
+ EFSYS_ASSERT(epop->epo_bist_poll != NULL);
+ if (epop->epo_bist_poll == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = epop->epo_bist_poll(enp, type, resultp, value_maskp,
+ valuesp, count)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN);
+ EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES);
+ EFSYS_ASSERT3U(epp->ep_current_bist, ==, type);
+
+ EFSYS_ASSERT(epop->epo_bist_stop != NULL);
+
+ if (epop->epo_bist_stop != NULL)
+ epop->epo_bist_stop(enp, type);
+
+ epp->ep_current_bist = EFX_BIST_TYPE_UNKNOWN;
+}
+
+#endif /* EFSYS_OPT_BIST */
+ void
+efx_phy_unprobe(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ epp->ep_epop = NULL;
+
+ epp->ep_adv_cap_mask = 0;
+
+ epp->ep_port = 0;
+ epp->ep_phy_type = 0;
+}
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_phy_ids.h b/src/seastar/dpdk/drivers/net/sfc/base/efx_phy_ids.h
new file mode 100644
index 00000000..9d9a0f90
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_phy_ids.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_PHY_IDS_H
+#define _SYS_EFX_PHY_IDS_H
+
+#define EFX_PHY_NULL 0
+
+typedef enum efx_phy_type_e { /* GENERATED BY scripts/genfwdef */
+ EFX_PHY_TXC43128 = 1,
+ EFX_PHY_SFX7101 = 3,
+ EFX_PHY_QT2022C2 = 4,
+ EFX_PHY_PM8358 = 6,
+ EFX_PHY_SFT9001A = 8,
+ EFX_PHY_QT2025C = 9,
+ EFX_PHY_SFT9001B = 10,
+ EFX_PHY_QLX111V = 12,
+ EFX_PHY_QT2025_KR = 17,
+ EFX_PHY_AEL3020 = 18,
+ EFX_PHY_XFI_FARMI = 19,
+} efx_phy_type_t;
+
+
+#endif /* _SYS_EFX_PHY_IDS_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_port.c b/src/seastar/dpdk/drivers/net/sfc/base/efx_port.c
new file mode 100644
index 00000000..518c2a22
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_port.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+ __checkReturn efx_rc_t
+efx_port_init(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enp->en_mod_flags & EFX_MOD_PORT) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ enp->en_mod_flags |= EFX_MOD_PORT;
+
+ epp->ep_mac_type = EFX_MAC_INVALID;
+ epp->ep_link_mode = EFX_LINK_UNKNOWN;
+ epp->ep_mac_drain = B_TRUE;
+
+ /* Configure the MAC */
+ if ((rc = efx_mac_select(enp)) != 0)
+ goto fail1;
+
+ epp->ep_emop->emo_reconfigure(enp);
+
+ /* Pick up current phy capababilities */
+ efx_port_poll(enp, NULL);
+
+ /*
+ * Turn on the PHY if available, otherwise reset it, and
+ * reconfigure it with the current configuration.
+ */
+ if (epop->epo_power != NULL) {
+ if ((rc = epop->epo_power(enp, B_TRUE)) != 0)
+ goto fail2;
+ } else {
+ if ((rc = epop->epo_reset(enp)) != 0)
+ goto fail2;
+ }
+
+ EFSYS_ASSERT(enp->en_reset_flags & EFX_RESET_PHY);
+ enp->en_reset_flags &= ~EFX_RESET_PHY;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_mod_flags &= ~EFX_MOD_PORT;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_port_poll(
+ __in efx_nic_t *enp,
+ __out_opt efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_link_mode_t ignore_link_mode;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT(emop != NULL);
+ EFSYS_ASSERT(!epp->ep_mac_stats_pending);
+
+ if (link_modep == NULL)
+ link_modep = &ignore_link_mode;
+
+ if ((rc = emop->emo_poll(enp, link_modep)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_LOOPBACK
+
+ __checkReturn efx_rc_t
+efx_port_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ EFSYS_ASSERT(link_mode < EFX_LINK_NMODES);
+
+ if (EFX_TEST_QWORD_BIT(encp->enc_loopback_types[link_mode],
+ loopback_type) == 0) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (epp->ep_loopback_type == loopback_type &&
+ epp->ep_loopback_link_mode == link_mode)
+ return (0);
+
+ if ((rc = emop->emo_loopback_set(enp, link_mode, loopback_type)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_NAMES
+
+static const char * const __efx_loopback_type_name[] = {
+ "OFF",
+ "DATA",
+ "GMAC",
+ "XGMII",
+ "XGXS",
+ "XAUI",
+ "GMII",
+ "SGMII",
+ "XGBR",
+ "XFI",
+ "XAUI_FAR",
+ "GMII_FAR",
+ "SGMII_FAR",
+ "XFI_FAR",
+ "GPHY",
+ "PHY_XS",
+ "PCS",
+ "PMA_PMD",
+ "XPORT",
+ "XGMII_WS",
+ "XAUI_WS",
+ "XAUI_WS_FAR",
+ "XAUI_WS_NEAR",
+ "GMII_WS",
+ "XFI_WS",
+ "XFI_WS_FAR",
+ "PHYXS_WS",
+ "PMA_INT",
+ "SD_NEAR",
+ "SD_FAR",
+ "PMA_INT_WS",
+ "SD_FEP2_WS",
+ "SD_FEP1_5_WS",
+ "SD_FEP_WS",
+ "SD_FES_WS",
+};
+
+ __checkReturn const char *
+efx_loopback_type_name(
+ __in efx_nic_t *enp,
+ __in efx_loopback_type_t type)
+{
+ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__efx_loopback_type_name) ==
+ EFX_LOOPBACK_NTYPES);
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(type, <, EFX_LOOPBACK_NTYPES);
+
+ return (__efx_loopback_type_name[type]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ void
+efx_port_fini(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT(epp->ep_mac_drain);
+
+ epp->ep_emop = NULL;
+ epp->ep_mac_type = EFX_MAC_INVALID;
+ epp->ep_mac_drain = B_FALSE;
+
+ /* Turn off the PHY */
+ if (epop->epo_power != NULL)
+ (void) epop->epo_power(enp, B_FALSE);
+
+ enp->en_mod_flags &= ~EFX_MOD_PORT;
+}
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_regs.h b/src/seastar/dpdk/drivers/net/sfc/base/efx_regs.h
new file mode 100644
index 00000000..a1a7f9da
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_regs.h
@@ -0,0 +1,3870 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_REGS_H
+#define _SYS_EFX_REGS_H
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**************************************************************************
+ *
+ * Falcon/Siena registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/*
+ * FR_AB_EE_VPD_CFG0_REG_SF(128bit):
+ * SPI/VPD configuration register 0
+ */
+#define FR_AB_EE_VPD_CFG0_REG_SF_OFST 0x00000300
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_EE_VPD_CFG0_REG(128bit):
+ * SPI/VPD configuration register 0
+ */
+#define FR_AB_EE_VPD_CFG0_REG_OFST 0x00000140
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SF_FASTRD_EN_LBN 127
+#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1
+#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120
+#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_VPD_WIP_POLL_LBN 119
+#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1
+#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112
+#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96
+#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16
+#define FRF_AB_EE_VPDW_LENGTH_LBN 80
+#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPDW_BASE_LBN 64
+#define FRF_AB_EE_VPDW_BASE_WIDTH 15
+#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56
+#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8
+#define FRF_AB_EE_VPD_BASE_LBN 32
+#define FRF_AB_EE_VPD_BASE_WIDTH 24
+#define FRF_AB_EE_VPD_LENGTH_LBN 16
+#define FRF_AB_EE_VPD_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPD_AD_SIZE_LBN 8
+#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5
+#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5
+#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1
+#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2
+#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1
+#define FRF_AB_EE_VPD_EN_LBN 0
+#define FRF_AB_EE_VPD_EN_WIDTH 1
+
+
+/*
+ * FR_AB_PCIE_SD_CTL0123_REG_SF(128bit):
+ * PCIE SerDes control register 0 to 3
+ */
+#define FR_AB_PCIE_SD_CTL0123_REG_SF_OFST 0x00000320
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_PCIE_SD_CTL0123_REG(128bit):
+ * PCIE SerDes control register 0 to 3
+ */
+#define FR_AB_PCIE_SD_CTL0123_REG_OFST 0x00000320
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_PCIE_TESTSIG_H_LBN 96
+#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19
+#define FRF_AB_PCIE_TESTSIG_L_LBN 64
+#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19
+#define FRF_AB_PCIE_OFFSET_LBN 56
+#define FRF_AB_PCIE_OFFSET_WIDTH 8
+#define FRF_AB_PCIE_OFFSETEN_H_LBN 55
+#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1
+#define FRF_AB_PCIE_OFFSETEN_L_LBN 54
+#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_H_LBN 53
+#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_L_LBN 52
+#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_H_LBN 51
+#define FRF_AB_PCIE_PARRESET_H_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_L_LBN 50
+#define FRF_AB_PCIE_PARRESET_L_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49
+#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48
+#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1
+#define FRF_AB_PCIE_LPBK_LBN 40
+#define FRF_AB_PCIE_LPBK_WIDTH 8
+#define FRF_AB_PCIE_PARLPBK_LBN 32
+#define FRF_AB_PCIE_PARLPBK_WIDTH 8
+#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30
+#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28
+#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26
+#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24
+#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_RXEQCTL_H_LBN 18
+#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2
+#define FRF_AB_PCIE_RXEQCTL_L_LBN 16
+#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2
+#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3
+#define FFE_AB_PCIE_RXEQCTL_OFF 2
+#define FFE_AB_PCIE_RXEQCTL_MIN 1
+#define FFE_AB_PCIE_RXEQCTL_MAX 0
+#define FRF_AB_PCIE_HIDRV_LBN 8
+#define FRF_AB_PCIE_HIDRV_WIDTH 8
+#define FRF_AB_PCIE_LODRV_LBN 0
+#define FRF_AB_PCIE_LODRV_WIDTH 8
+
+
+/*
+ * FR_AB_PCIE_SD_CTL45_REG_SF(128bit):
+ * PCIE SerDes control register 4 and 5
+ */
+#define FR_AB_PCIE_SD_CTL45_REG_SF_OFST 0x00000330
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_PCIE_SD_CTL45_REG(128bit):
+ * PCIE SerDes control register 4 and 5
+ */
+#define FR_AB_PCIE_SD_CTL45_REG_OFST 0x00000330
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_PCIE_DTX7_LBN 60
+#define FRF_AB_PCIE_DTX7_WIDTH 4
+#define FRF_AB_PCIE_DTX6_LBN 56
+#define FRF_AB_PCIE_DTX6_WIDTH 4
+#define FRF_AB_PCIE_DTX5_LBN 52
+#define FRF_AB_PCIE_DTX5_WIDTH 4
+#define FRF_AB_PCIE_DTX4_LBN 48
+#define FRF_AB_PCIE_DTX4_WIDTH 4
+#define FRF_AB_PCIE_DTX3_LBN 44
+#define FRF_AB_PCIE_DTX3_WIDTH 4
+#define FRF_AB_PCIE_DTX2_LBN 40
+#define FRF_AB_PCIE_DTX2_WIDTH 4
+#define FRF_AB_PCIE_DTX1_LBN 36
+#define FRF_AB_PCIE_DTX1_WIDTH 4
+#define FRF_AB_PCIE_DTX0_LBN 32
+#define FRF_AB_PCIE_DTX0_WIDTH 4
+#define FRF_AB_PCIE_DEQ7_LBN 28
+#define FRF_AB_PCIE_DEQ7_WIDTH 4
+#define FRF_AB_PCIE_DEQ6_LBN 24
+#define FRF_AB_PCIE_DEQ6_WIDTH 4
+#define FRF_AB_PCIE_DEQ5_LBN 20
+#define FRF_AB_PCIE_DEQ5_WIDTH 4
+#define FRF_AB_PCIE_DEQ4_LBN 16
+#define FRF_AB_PCIE_DEQ4_WIDTH 4
+#define FRF_AB_PCIE_DEQ3_LBN 12
+#define FRF_AB_PCIE_DEQ3_WIDTH 4
+#define FRF_AB_PCIE_DEQ2_LBN 8
+#define FRF_AB_PCIE_DEQ2_WIDTH 4
+#define FRF_AB_PCIE_DEQ1_LBN 4
+#define FRF_AB_PCIE_DEQ1_WIDTH 4
+#define FRF_AB_PCIE_DEQ0_LBN 0
+#define FRF_AB_PCIE_DEQ0_WIDTH 4
+
+
+/*
+ * FR_AB_PCIE_PCS_CTL_STAT_REG_SF(128bit):
+ * PCIE PCS control and status register
+ */
+#define FR_AB_PCIE_PCS_CTL_STAT_REG_SF_OFST 0x00000340
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_PCIE_PCS_CTL_STAT_REG(128bit):
+ * PCIE PCS control and status register
+ */
+#define FR_AB_PCIE_PCS_CTL_STAT_REG_OFST 0x00000340
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4
+#define FRF_AB_PCIE_PRBSERR_LBN 40
+#define FRF_AB_PCIE_PRBSERR_WIDTH 8
+#define FRF_AB_PCIE_PRBSERRH0_LBN 32
+#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8
+#define FRF_AB_PCIE_FASTINIT_H_LBN 15
+#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1
+#define FRF_AB_PCIE_FASTINIT_L_LBN 14
+#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13
+#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12
+#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11
+#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10
+#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9
+#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8
+#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSEL_LBN 0
+#define FRF_AB_PCIE_PRBSSEL_WIDTH 8
+
+
+/*
+ * FR_AB_HW_INIT_REG_SF(128bit):
+ * Hardware initialization register
+ */
+#define FR_AB_HW_INIT_REG_SF_OFST 0x00000350
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AZ_HW_INIT_REG(128bit):
+ * Hardware initialization register
+ */
+#define FR_AZ_HW_INIT_REG_OFST 0x000000c0
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_BB_BDMRD_CPLF_FULL_LBN 124
+#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3
+#define FRF_CZ_TX_MRG_TAGS_LBN 120
+#define FRF_CZ_TX_MRG_TAGS_WIDTH 1
+#define FRF_AZ_TRGT_MASK_ALL_LBN 100
+#define FRF_AZ_TRGT_MASK_ALL_WIDTH 1
+#define FRF_AZ_DOORBELL_DROP_LBN 92
+#define FRF_AZ_DOORBELL_DROP_WIDTH 8
+#define FRF_AB_TX_RREQ_MASK_EN_LBN 76
+#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1
+#define FRF_AB_PE_EIDLE_DIS_LBN 75
+#define FRF_AB_PE_EIDLE_DIS_WIDTH 1
+#define FRF_AZ_FC_BLOCKING_EN_LBN 45
+#define FRF_AZ_FC_BLOCKING_EN_WIDTH 1
+#define FRF_AZ_B2B_REQ_EN_LBN 44
+#define FRF_AZ_B2B_REQ_EN_WIDTH 1
+#define FRF_AZ_POST_WR_MASK_LBN 40
+#define FRF_AZ_POST_WR_MASK_WIDTH 4
+#define FRF_AZ_TLP_TC_LBN 34
+#define FRF_AZ_TLP_TC_WIDTH 3
+#define FRF_AZ_TLP_ATTR_LBN 32
+#define FRF_AZ_TLP_ATTR_WIDTH 2
+#define FRF_AB_INTB_VEC_LBN 24
+#define FRF_AB_INTB_VEC_WIDTH 5
+#define FRF_AB_INTA_VEC_LBN 16
+#define FRF_AB_INTA_VEC_WIDTH 5
+#define FRF_AZ_WD_TIMER_LBN 8
+#define FRF_AZ_WD_TIMER_WIDTH 8
+#define FRF_AZ_US_DISABLE_LBN 5
+#define FRF_AZ_US_DISABLE_WIDTH 1
+#define FRF_AZ_TLP_EP_LBN 4
+#define FRF_AZ_TLP_EP_WIDTH 1
+#define FRF_AZ_ATTR_SEL_LBN 3
+#define FRF_AZ_ATTR_SEL_WIDTH 1
+#define FRF_AZ_TD_SEL_LBN 1
+#define FRF_AZ_TD_SEL_WIDTH 1
+#define FRF_AZ_TLP_TD_LBN 0
+#define FRF_AZ_TLP_TD_WIDTH 1
+
+
+/*
+ * FR_AB_NIC_STAT_REG_SF(128bit):
+ * NIC status register
+ */
+#define FR_AB_NIC_STAT_REG_SF_OFST 0x00000360
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_NIC_STAT_REG(128bit):
+ * NIC status register
+ */
+#define FR_AB_NIC_STAT_REG_OFST 0x00000200
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_BB_AER_DIS_LBN 34
+#define FRF_BB_AER_DIS_WIDTH 1
+#define FRF_BB_EE_STRAP_EN_LBN 31
+#define FRF_BB_EE_STRAP_EN_WIDTH 1
+#define FRF_BB_EE_STRAP_LBN 24
+#define FRF_BB_EE_STRAP_WIDTH 4
+#define FRF_BB_REVISION_ID_LBN 17
+#define FRF_BB_REVISION_ID_WIDTH 7
+#define FRF_AB_ONCHIP_SRAM_LBN 16
+#define FRF_AB_ONCHIP_SRAM_WIDTH 1
+#define FRF_AB_SF_PRST_LBN 9
+#define FRF_AB_SF_PRST_WIDTH 1
+#define FRF_AB_EE_PRST_LBN 8
+#define FRF_AB_EE_PRST_WIDTH 1
+#define FRF_AB_ATE_MODE_LBN 3
+#define FRF_AB_ATE_MODE_WIDTH 1
+#define FRF_AB_STRAP_PINS_LBN 0
+#define FRF_AB_STRAP_PINS_WIDTH 3
+
+
+/*
+ * FR_AB_GLB_CTL_REG_SF(128bit):
+ * Global control register
+ */
+#define FR_AB_GLB_CTL_REG_SF_OFST 0x00000370
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_GLB_CTL_REG(128bit):
+ * Global control register
+ */
+#define FR_AB_GLB_CTL_REG_OFST 0x00000220
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EXT_PHY_RST_CTL_LBN 63
+#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1
+#define FRF_AB_XAUI_SD_RST_CTL_LBN 62
+#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_SD_RST_CTL_LBN 61
+#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1
+#define FRF_AA_PCIX_RST_CTL_LBN 60
+#define FRF_AA_PCIX_RST_CTL_WIDTH 1
+#define FRF_BB_BIU_RST_CTL_LBN 60
+#define FRF_BB_BIU_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59
+#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58
+#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57
+#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1
+#define FRF_AB_XGRX_RST_CTL_LBN 56
+#define FRF_AB_XGRX_RST_CTL_WIDTH 1
+#define FRF_AB_XGTX_RST_CTL_LBN 55
+#define FRF_AB_XGTX_RST_CTL_WIDTH 1
+#define FRF_AB_EM_RST_CTL_LBN 54
+#define FRF_AB_EM_RST_CTL_WIDTH 1
+#define FRF_AB_EV_RST_CTL_LBN 53
+#define FRF_AB_EV_RST_CTL_WIDTH 1
+#define FRF_AB_SR_RST_CTL_LBN 52
+#define FRF_AB_SR_RST_CTL_WIDTH 1
+#define FRF_AB_RX_RST_CTL_LBN 51
+#define FRF_AB_RX_RST_CTL_WIDTH 1
+#define FRF_AB_TX_RST_CTL_LBN 50
+#define FRF_AB_TX_RST_CTL_WIDTH 1
+#define FRF_AB_EE_RST_CTL_LBN 49
+#define FRF_AB_EE_RST_CTL_WIDTH 1
+#define FRF_AB_CS_RST_CTL_LBN 48
+#define FRF_AB_CS_RST_CTL_WIDTH 1
+#define FRF_AB_HOT_RST_CTL_LBN 40
+#define FRF_AB_HOT_RST_CTL_WIDTH 2
+#define FRF_AB_RST_EXT_PHY_LBN 31
+#define FRF_AB_RST_EXT_PHY_WIDTH 1
+#define FRF_AB_RST_XAUI_SD_LBN 30
+#define FRF_AB_RST_XAUI_SD_WIDTH 1
+#define FRF_AB_RST_PCIE_SD_LBN 29
+#define FRF_AB_RST_PCIE_SD_WIDTH 1
+#define FRF_AA_RST_PCIX_LBN 28
+#define FRF_AA_RST_PCIX_WIDTH 1
+#define FRF_BB_RST_BIU_LBN 28
+#define FRF_BB_RST_BIU_WIDTH 1
+#define FRF_AB_RST_PCIE_STKY_LBN 27
+#define FRF_AB_RST_PCIE_STKY_WIDTH 1
+#define FRF_AB_RST_PCIE_NSTKY_LBN 26
+#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1
+#define FRF_AB_RST_PCIE_CORE_LBN 25
+#define FRF_AB_RST_PCIE_CORE_WIDTH 1
+#define FRF_AB_RST_XGRX_LBN 24
+#define FRF_AB_RST_XGRX_WIDTH 1
+#define FRF_AB_RST_XGTX_LBN 23
+#define FRF_AB_RST_XGTX_WIDTH 1
+#define FRF_AB_RST_EM_LBN 22
+#define FRF_AB_RST_EM_WIDTH 1
+#define FRF_AB_RST_EV_LBN 21
+#define FRF_AB_RST_EV_WIDTH 1
+#define FRF_AB_RST_SR_LBN 20
+#define FRF_AB_RST_SR_WIDTH 1
+#define FRF_AB_RST_RX_LBN 19
+#define FRF_AB_RST_RX_WIDTH 1
+#define FRF_AB_RST_TX_LBN 18
+#define FRF_AB_RST_TX_WIDTH 1
+#define FRF_AB_RST_SF_LBN 17
+#define FRF_AB_RST_SF_WIDTH 1
+#define FRF_AB_RST_CS_LBN 16
+#define FRF_AB_RST_CS_WIDTH 1
+#define FRF_AB_INT_RST_DUR_LBN 4
+#define FRF_AB_INT_RST_DUR_WIDTH 3
+#define FRF_AB_EXT_PHY_RST_DUR_LBN 1
+#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3
+#define FFE_AB_EXT_PHY_RST_DUR_10240US 7
+#define FFE_AB_EXT_PHY_RST_DUR_5120US 6
+#define FFE_AB_EXT_PHY_RST_DUR_2560US 5
+#define FFE_AB_EXT_PHY_RST_DUR_1280US 4
+#define FFE_AB_EXT_PHY_RST_DUR_640US 3
+#define FFE_AB_EXT_PHY_RST_DUR_320US 2
+#define FFE_AB_EXT_PHY_RST_DUR_160US 1
+#define FFE_AB_EXT_PHY_RST_DUR_80US 0
+#define FRF_AB_SWRST_LBN 0
+#define FRF_AB_SWRST_WIDTH 1
+
+
+/*
+ * FR_AZ_IOM_IND_ADR_REG(32bit):
+ * IO-mapped indirect access address register
+ */
+#define FR_AZ_IOM_IND_ADR_REG_OFST 0x00000000
+/* falcona0,falconb0,sienaa0=net_func_bar0 */
+
+#define FRF_AZ_IOM_AUTO_ADR_INC_EN_LBN 24
+#define FRF_AZ_IOM_AUTO_ADR_INC_EN_WIDTH 1
+#define FRF_AZ_IOM_IND_ADR_LBN 0
+#define FRF_AZ_IOM_IND_ADR_WIDTH 24
+
+
+/*
+ * FR_AZ_IOM_IND_DAT_REG(32bit):
+ * IO-mapped indirect access data register
+ */
+#define FR_AZ_IOM_IND_DAT_REG_OFST 0x00000004
+/* falcona0,falconb0,sienaa0=net_func_bar0 */
+
+#define FRF_AZ_IOM_IND_DAT_LBN 0
+#define FRF_AZ_IOM_IND_DAT_WIDTH 32
+
+
+/*
+ * FR_AZ_ADR_REGION_REG(128bit):
+ * Address region register
+ */
+#define FR_AZ_ADR_REGION_REG_OFST 0x00000000
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_ADR_REGION3_LBN 96
+#define FRF_AZ_ADR_REGION3_WIDTH 18
+#define FRF_AZ_ADR_REGION2_LBN 64
+#define FRF_AZ_ADR_REGION2_WIDTH 18
+#define FRF_AZ_ADR_REGION1_LBN 32
+#define FRF_AZ_ADR_REGION1_WIDTH 18
+#define FRF_AZ_ADR_REGION0_LBN 0
+#define FRF_AZ_ADR_REGION0_WIDTH 18
+
+
+/*
+ * FR_AZ_INT_EN_REG_KER(128bit):
+ * Kernel driver Interrupt enable register
+ */
+#define FR_AZ_INT_EN_REG_KER_OFST 0x00000010
+/* falcona0,falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8
+#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6
+#define FRF_AZ_KER_INT_CHAR_LBN 4
+#define FRF_AZ_KER_INT_CHAR_WIDTH 1
+#define FRF_AZ_KER_INT_KER_LBN 3
+#define FRF_AZ_KER_INT_KER_WIDTH 1
+#define FRF_AZ_DRV_INT_EN_KER_LBN 0
+#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1
+
+
+/*
+ * FR_AZ_INT_EN_REG_CHAR(128bit):
+ * Char Driver interrupt enable register
+ */
+#define FR_AZ_INT_EN_REG_CHAR_OFST 0x00000020
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_CHAR_INT_LEVE_SEL_LBN 8
+#define FRF_AZ_CHAR_INT_LEVE_SEL_WIDTH 6
+#define FRF_AZ_CHAR_INT_CHAR_LBN 4
+#define FRF_AZ_CHAR_INT_CHAR_WIDTH 1
+#define FRF_AZ_CHAR_INT_KER_LBN 3
+#define FRF_AZ_CHAR_INT_KER_WIDTH 1
+#define FRF_AZ_DRV_INT_EN_CHAR_LBN 0
+#define FRF_AZ_DRV_INT_EN_CHAR_WIDTH 1
+
+
+/*
+ * FR_AZ_INT_ADR_REG_KER(128bit):
+ * Interrupt host address for Kernel driver
+ */
+#define FR_AZ_INT_ADR_REG_KER_OFST 0x00000030
+/* falcona0,falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1
+#define FRF_AZ_INT_ADR_KER_LBN 0
+#define FRF_AZ_INT_ADR_KER_WIDTH 64
+#define FRF_AZ_INT_ADR_KER_DW0_LBN 0
+#define FRF_AZ_INT_ADR_KER_DW0_WIDTH 32
+#define FRF_AZ_INT_ADR_KER_DW1_LBN 32
+#define FRF_AZ_INT_ADR_KER_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_INT_ADR_REG_CHAR(128bit):
+ * Interrupt host address for Char driver
+ */
+#define FR_AZ_INT_ADR_REG_CHAR_OFST 0x00000040
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_NORM_INT_VEC_DIS_CHAR_LBN 64
+#define FRF_AZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1
+#define FRF_AZ_INT_ADR_CHAR_LBN 0
+#define FRF_AZ_INT_ADR_CHAR_WIDTH 64
+#define FRF_AZ_INT_ADR_CHAR_DW0_LBN 0
+#define FRF_AZ_INT_ADR_CHAR_DW0_WIDTH 32
+#define FRF_AZ_INT_ADR_CHAR_DW1_LBN 32
+#define FRF_AZ_INT_ADR_CHAR_DW1_WIDTH 32
+
+
+/*
+ * FR_AA_INT_ACK_KER(32bit):
+ * Kernel interrupt acknowledge register
+ */
+#define FR_AA_INT_ACK_KER_OFST 0x00000050
+/* falcona0=net_func_bar2 */
+
+#define FRF_AA_INT_ACK_KER_FIELD_LBN 0
+#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
+
+
+/*
+ * FR_BZ_INT_ISR0_REG(128bit):
+ * Function 0 Interrupt Acknowlege Status register
+ */
+#define FR_BZ_INT_ISR0_REG_OFST 0x00000090
+/* falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_BZ_INT_ISR_REG_LBN 0
+#define FRF_BZ_INT_ISR_REG_WIDTH 64
+#define FRF_BZ_INT_ISR_REG_DW0_LBN 0
+#define FRF_BZ_INT_ISR_REG_DW0_WIDTH 32
+#define FRF_BZ_INT_ISR_REG_DW1_LBN 32
+#define FRF_BZ_INT_ISR_REG_DW1_WIDTH 32
+
+
+/*
+ * FR_AB_EE_SPI_HCMD_REG(128bit):
+ * SPI host command register
+ */
+#define FR_AB_EE_SPI_HCMD_REG_OFST 0x00000100
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1
+#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28
+#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16
+#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5
+#define FRF_AB_EE_SPI_HCMD_READ_LBN 15
+#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0
+#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8
+
+
+/*
+ * FR_CZ_USR_EV_CFG(32bit):
+ * User Level Event Configuration register
+ */
+#define FR_CZ_USR_EV_CFG_OFST 0x00000100
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_USREV_DIS_LBN 16
+#define FRF_CZ_USREV_DIS_WIDTH 1
+#define FRF_CZ_DFLT_EVQ_LBN 0
+#define FRF_CZ_DFLT_EVQ_WIDTH 10
+
+
+/*
+ * FR_AB_EE_SPI_HADR_REG(128bit):
+ * SPI host address register
+ */
+#define FR_AB_EE_SPI_HADR_REG_OFST 0x00000110
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24
+#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8
+#define FRF_AB_EE_SPI_HADR_ADR_LBN 0
+#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24
+
+
+/*
+ * FR_AB_EE_SPI_HDATA_REG(128bit):
+ * SPI host data register
+ */
+#define FR_AB_EE_SPI_HDATA_REG_OFST 0x00000120
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SPI_HDATA3_LBN 96
+#define FRF_AB_EE_SPI_HDATA3_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA2_LBN 64
+#define FRF_AB_EE_SPI_HDATA2_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA1_LBN 32
+#define FRF_AB_EE_SPI_HDATA1_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA0_LBN 0
+#define FRF_AB_EE_SPI_HDATA0_WIDTH 32
+
+
+/*
+ * FR_AB_EE_BASE_PAGE_REG(128bit):
+ * Expansion ROM base mirror register
+ */
+#define FR_AB_EE_BASE_PAGE_REG_OFST 0x00000130
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_EXPROM_MASK_LBN 16
+#define FRF_AB_EE_EXPROM_MASK_WIDTH 13
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13
+
+
+/*
+ * FR_AB_EE_VPD_SW_CNTL_REG(128bit):
+ * VPD access SW control register
+ */
+#define FR_AB_EE_VPD_SW_CNTL_REG_OFST 0x00000150
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31
+#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28
+#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_ADR_LBN 0
+#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15
+
+
+/*
+ * FR_AB_EE_VPD_SW_DATA_REG(128bit):
+ * VPD access SW data register
+ */
+#define FR_AB_EE_VPD_SW_DATA_REG_OFST 0x00000160
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_VPD_CYC_DAT_LBN 0
+#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32
+
+
+/*
+ * FR_BB_PCIE_CORE_INDIRECT_REG(64bit):
+ * Indirect Access to PCIE Core registers
+ */
+#define FR_BB_PCIE_CORE_INDIRECT_REG_OFST 0x000001f0
+/* falconb0=net_func_bar2 */
+
+#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32
+#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12
+
+
+/*
+ * FR_AB_GPIO_CTL_REG(128bit):
+ * GPIO control register
+ */
+#define FR_AB_GPIO_CTL_REG_OFST 0x00000210
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GPIO15_OEN_LBN 63
+#define FRF_AB_GPIO15_OEN_WIDTH 1
+#define FRF_AB_GPIO14_OEN_LBN 62
+#define FRF_AB_GPIO14_OEN_WIDTH 1
+#define FRF_AB_GPIO13_OEN_LBN 61
+#define FRF_AB_GPIO13_OEN_WIDTH 1
+#define FRF_AB_GPIO12_OEN_LBN 60
+#define FRF_AB_GPIO12_OEN_WIDTH 1
+#define FRF_AB_GPIO11_OEN_LBN 59
+#define FRF_AB_GPIO11_OEN_WIDTH 1
+#define FRF_AB_GPIO10_OEN_LBN 58
+#define FRF_AB_GPIO10_OEN_WIDTH 1
+#define FRF_AB_GPIO9_OEN_LBN 57
+#define FRF_AB_GPIO9_OEN_WIDTH 1
+#define FRF_AB_GPIO8_OEN_LBN 56
+#define FRF_AB_GPIO8_OEN_WIDTH 1
+#define FRF_AB_GPIO15_OUT_LBN 55
+#define FRF_AB_GPIO15_OUT_WIDTH 1
+#define FRF_AB_GPIO14_OUT_LBN 54
+#define FRF_AB_GPIO14_OUT_WIDTH 1
+#define FRF_AB_GPIO13_OUT_LBN 53
+#define FRF_AB_GPIO13_OUT_WIDTH 1
+#define FRF_AB_GPIO12_OUT_LBN 52
+#define FRF_AB_GPIO12_OUT_WIDTH 1
+#define FRF_AB_GPIO11_OUT_LBN 51
+#define FRF_AB_GPIO11_OUT_WIDTH 1
+#define FRF_AB_GPIO10_OUT_LBN 50
+#define FRF_AB_GPIO10_OUT_WIDTH 1
+#define FRF_AB_GPIO9_OUT_LBN 49
+#define FRF_AB_GPIO9_OUT_WIDTH 1
+#define FRF_AB_GPIO8_OUT_LBN 48
+#define FRF_AB_GPIO8_OUT_WIDTH 1
+#define FRF_AB_GPIO15_IN_LBN 47
+#define FRF_AB_GPIO15_IN_WIDTH 1
+#define FRF_AB_GPIO14_IN_LBN 46
+#define FRF_AB_GPIO14_IN_WIDTH 1
+#define FRF_AB_GPIO13_IN_LBN 45
+#define FRF_AB_GPIO13_IN_WIDTH 1
+#define FRF_AB_GPIO12_IN_LBN 44
+#define FRF_AB_GPIO12_IN_WIDTH 1
+#define FRF_AB_GPIO11_IN_LBN 43
+#define FRF_AB_GPIO11_IN_WIDTH 1
+#define FRF_AB_GPIO10_IN_LBN 42
+#define FRF_AB_GPIO10_IN_WIDTH 1
+#define FRF_AB_GPIO9_IN_LBN 41
+#define FRF_AB_GPIO9_IN_WIDTH 1
+#define FRF_AB_GPIO8_IN_LBN 40
+#define FRF_AB_GPIO8_IN_WIDTH 1
+#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39
+#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38
+#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37
+#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36
+#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35
+#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34
+#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33
+#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32
+#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1
+#define FRF_BB_CLK156_OUT_EN_LBN 31
+#define FRF_BB_CLK156_OUT_EN_WIDTH 1
+#define FRF_BB_USE_NIC_CLK_LBN 30
+#define FRF_BB_USE_NIC_CLK_WIDTH 1
+#define FRF_AB_GPIO5_OEN_LBN 29
+#define FRF_AB_GPIO5_OEN_WIDTH 1
+#define FRF_AB_GPIO4_OEN_LBN 28
+#define FRF_AB_GPIO4_OEN_WIDTH 1
+#define FRF_AB_GPIO3_OEN_LBN 27
+#define FRF_AB_GPIO3_OEN_WIDTH 1
+#define FRF_AB_GPIO2_OEN_LBN 26
+#define FRF_AB_GPIO2_OEN_WIDTH 1
+#define FRF_AB_GPIO1_OEN_LBN 25
+#define FRF_AB_GPIO1_OEN_WIDTH 1
+#define FRF_AB_GPIO0_OEN_LBN 24
+#define FRF_AB_GPIO0_OEN_WIDTH 1
+#define FRF_AB_GPIO5_OUT_LBN 21
+#define FRF_AB_GPIO5_OUT_WIDTH 1
+#define FRF_AB_GPIO4_OUT_LBN 20
+#define FRF_AB_GPIO4_OUT_WIDTH 1
+#define FRF_AB_GPIO3_OUT_LBN 19
+#define FRF_AB_GPIO3_OUT_WIDTH 1
+#define FRF_AB_GPIO2_OUT_LBN 18
+#define FRF_AB_GPIO2_OUT_WIDTH 1
+#define FRF_AB_GPIO1_OUT_LBN 17
+#define FRF_AB_GPIO1_OUT_WIDTH 1
+#define FRF_AB_GPIO0_OUT_LBN 16
+#define FRF_AB_GPIO0_OUT_WIDTH 1
+#define FRF_AB_GPIO5_IN_LBN 13
+#define FRF_AB_GPIO5_IN_WIDTH 1
+#define FRF_AB_GPIO4_IN_LBN 12
+#define FRF_AB_GPIO4_IN_WIDTH 1
+#define FRF_AB_GPIO3_IN_LBN 11
+#define FRF_AB_GPIO3_IN_WIDTH 1
+#define FRF_AB_GPIO2_IN_LBN 10
+#define FRF_AB_GPIO2_IN_WIDTH 1
+#define FRF_AB_GPIO1_IN_LBN 9
+#define FRF_AB_GPIO1_IN_WIDTH 1
+#define FRF_AB_GPIO0_IN_LBN 8
+#define FRF_AB_GPIO0_IN_WIDTH 1
+#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5
+#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4
+#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3
+#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2
+#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0
+#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1
+
+
+/*
+ * FR_AZ_FATAL_INTR_REG_KER(128bit):
+ * Fatal interrupt register for Kernel
+ */
+#define FR_AZ_FATAL_INTR_REG_KER_OFST 0x00000230
+/* falcona0,falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40
+#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33
+#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32
+#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11
+#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_LBN 11
+#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10
+#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_LBN 8
+#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7
+#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6
+#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5
+#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4
+#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3
+#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2
+#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_LBN 1
+#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_LBN 0
+#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1
+
+
+/*
+ * FR_AZ_FATAL_INTR_REG_CHAR(128bit):
+ * Fatal interrupt register for Char
+ */
+#define FR_AZ_FATAL_INTR_REG_CHAR_OFST 0x00000240
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_CHAR_EN_LBN 43
+#define FRF_AB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_CHAR_EN_LBN 42
+#define FRF_AZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_BUFID_OOB_INT_CHAR_EN_LBN 41
+#define FRF_AZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_CHAR_EN_LBN 40
+#define FRF_AZ_MEM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_CHAR_EN_LBN 39
+#define FRF_AZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_CHAR_EN_LBN 38
+#define FRF_AZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_CHAR_EN_LBN 35
+#define FRF_AZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_CHAR_EN_LBN 34
+#define FRF_AZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_CHAR_EN_LBN 33
+#define FRF_AZ_ILL_ADR_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_CHAR_EN_LBN 32
+#define FRF_AZ_SRM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_CHAR_LBN 11
+#define FRF_AB_PCI_BUSERR_INT_CHAR_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11
+#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_CHAR_LBN 10
+#define FRF_AZ_SRAM_OOB_INT_CHAR_WIDTH 1
+#define FRF_AZ_BUFID_DC_OOB_INT_CHAR_LBN 9
+#define FRF_AZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_CHAR_LBN 8
+#define FRF_AZ_MEM_PERR_INT_CHAR_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_CHAR_LBN 7
+#define FRF_AZ_RBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_CHAR_LBN 6
+#define FRF_AZ_TBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_LBN 5
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_LBN 4
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_CHAR_LBN 3
+#define FRF_AZ_EVQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_CHAR_LBN 2
+#define FRF_AZ_EVF_OFLO_INT_CHAR_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_CHAR_LBN 1
+#define FRF_AZ_ILL_ADR_INT_CHAR_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_CHAR_LBN 0
+#define FRF_AZ_SRM_PERR_INT_CHAR_WIDTH 1
+
+
+/*
+ * FR_AZ_DP_CTRL_REG(128bit):
+ * Datapath control register
+ */
+#define FR_AZ_DP_CTRL_REG_OFST 0x00000250
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_FLS_EVQ_ID_LBN 0
+#define FRF_AZ_FLS_EVQ_ID_WIDTH 12
+
+
+/*
+ * FR_AZ_MEM_STAT_REG(128bit):
+ * Memory status register
+ */
+#define FR_AZ_MEM_STAT_REG_OFST 0x00000260
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MEM_PERR_VEC_LBN 53
+#define FRF_AB_MEM_PERR_VEC_WIDTH 40
+#define FRF_AB_MEM_PERR_VEC_DW0_LBN 53
+#define FRF_AB_MEM_PERR_VEC_DW0_WIDTH 32
+#define FRF_AB_MEM_PERR_VEC_DW1_LBN 85
+#define FRF_AB_MEM_PERR_VEC_DW1_WIDTH 6
+#define FRF_AB_MBIST_CORR_LBN 38
+#define FRF_AB_MBIST_CORR_WIDTH 15
+#define FRF_AB_MBIST_ERR_LBN 0
+#define FRF_AB_MBIST_ERR_WIDTH 40
+#define FRF_AB_MBIST_ERR_DW0_LBN 0
+#define FRF_AB_MBIST_ERR_DW0_WIDTH 32
+#define FRF_AB_MBIST_ERR_DW1_LBN 32
+#define FRF_AB_MBIST_ERR_DW1_WIDTH 6
+#define FRF_CZ_MEM_PERR_VEC_LBN 0
+#define FRF_CZ_MEM_PERR_VEC_WIDTH 35
+#define FRF_CZ_MEM_PERR_VEC_DW0_LBN 0
+#define FRF_CZ_MEM_PERR_VEC_DW0_WIDTH 32
+#define FRF_CZ_MEM_PERR_VEC_DW1_LBN 32
+#define FRF_CZ_MEM_PERR_VEC_DW1_WIDTH 3
+
+
+/*
+ * FR_PORT0_CS_DEBUG_REG(128bit):
+ * Debug register
+ */
+
+#define FR_AZ_CS_DEBUG_REG_OFST 0x00000270
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GLB_DEBUG2_SEL_LBN 50
+#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL2_LBN 47
+#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL1_LBN 44
+#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL0_LBN 41
+#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3
+#define FRF_CZ_CS_PORT_NUM_LBN 40
+#define FRF_CZ_CS_PORT_NUM_WIDTH 2
+#define FRF_AB_MISC_DEBUG_ADDR_LBN 36
+#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5
+#define FRF_CZ_CS_RESERVED_LBN 36
+#define FRF_CZ_CS_RESERVED_WIDTH 4
+#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31
+#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5
+#define FRF_CZ_CS_PORT_FPE_DW0_LBN 1
+#define FRF_CZ_CS_PORT_FPE_DW0_WIDTH 32
+#define FRF_CZ_CS_PORT_FPE_DW1_LBN 33
+#define FRF_CZ_CS_PORT_FPE_DW1_WIDTH 3
+#define FRF_CZ_CS_PORT_FPE_LBN 1
+#define FRF_CZ_CS_PORT_FPE_WIDTH 35
+#define FRF_AB_EM_DEBUG_ADDR_LBN 26
+#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_SR_DEBUG_ADDR_LBN 21
+#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_EV_DEBUG_ADDR_LBN 16
+#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_RX_DEBUG_ADDR_LBN 11
+#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_TX_DEBUG_ADDR_LBN 6
+#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1
+#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5
+#define FRF_AZ_CS_DEBUG_EN_LBN 0
+#define FRF_AZ_CS_DEBUG_EN_WIDTH 1
+
+
+/*
+ * FR_AZ_DRIVER_REG(128bit):
+ * Driver scratch register [0-7]
+ */
+#define FR_AZ_DRIVER_REG_OFST 0x00000280
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_DRIVER_REG_STEP 16
+#define FR_AZ_DRIVER_REG_ROWS 8
+
+#define FRF_AZ_DRIVER_DW0_LBN 0
+#define FRF_AZ_DRIVER_DW0_WIDTH 32
+
+
+/*
+ * FR_AZ_ALTERA_BUILD_REG(128bit):
+ * Altera build register
+ */
+#define FR_AZ_ALTERA_BUILD_REG_OFST 0x00000300
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_ALTERA_BUILD_VER_LBN 0
+#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32
+
+
+/*
+ * FR_AZ_CSR_SPARE_REG(128bit):
+ * Spare register
+ */
+#define FR_AZ_CSR_SPARE_REG_OFST 0x00000310
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_MEM_PERR_EN_TX_DATA_LBN 72
+#define FRF_AZ_MEM_PERR_EN_TX_DATA_WIDTH 2
+#define FRF_AZ_MEM_PERR_EN_LBN 64
+#define FRF_AZ_MEM_PERR_EN_WIDTH 38
+#define FRF_AZ_MEM_PERR_EN_DW0_LBN 64
+#define FRF_AZ_MEM_PERR_EN_DW0_WIDTH 32
+#define FRF_AZ_MEM_PERR_EN_DW1_LBN 96
+#define FRF_AZ_MEM_PERR_EN_DW1_WIDTH 6
+#define FRF_AZ_CSR_SPARE_BITS_LBN 0
+#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32
+
+
+/*
+ * FR_BZ_DEBUG_DATA_OUT_REG(128bit):
+ * Live Debug and Debug 2 out ports
+ */
+#define FR_BZ_DEBUG_DATA_OUT_REG_OFST 0x00000350
+/* falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_BZ_DEBUG2_PORT_LBN 25
+#define FRF_BZ_DEBUG2_PORT_WIDTH 15
+#define FRF_BZ_DEBUG1_PORT_LBN 0
+#define FRF_BZ_DEBUG1_PORT_WIDTH 25
+
+
+/*
+ * FR_BZ_EVQ_RPTR_REGP0(32bit):
+ * Event queue read pointer register
+ */
+#define FR_BZ_EVQ_RPTR_REGP0_OFST 0x00000400
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_EVQ_RPTR_REGP0_STEP 8192
+#define FR_BZ_EVQ_RPTR_REGP0_ROWS 1024
+/*
+ * FR_AA_EVQ_RPTR_REG_KER(32bit):
+ * Event queue read pointer register
+ */
+#define FR_AA_EVQ_RPTR_REG_KER_OFST 0x00011b00
+/* falcona0=net_func_bar2 */
+#define FR_AA_EVQ_RPTR_REG_KER_STEP 4
+#define FR_AA_EVQ_RPTR_REG_KER_ROWS 4
+/*
+ * FR_AZ_EVQ_RPTR_REG(32bit):
+ * Event queue read pointer register
+ */
+#define FR_AZ_EVQ_RPTR_REG_OFST 0x00fa0000
+/* falconb0=net_func_bar2,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_EVQ_RPTR_REG_STEP 16
+#define FR_AB_EVQ_RPTR_REG_ROWS 4096
+#define FR_CZ_EVQ_RPTR_REG_ROWS 1024
+/*
+ * FR_BB_EVQ_RPTR_REGP123(32bit):
+ * Event queue read pointer register
+ */
+#define FR_BB_EVQ_RPTR_REGP123_OFST 0x01000400
+/* falconb0=net_func_bar2 */
+#define FR_BB_EVQ_RPTR_REGP123_STEP 8192
+#define FR_BB_EVQ_RPTR_REGP123_ROWS 3072
+
+#define FRF_AZ_EVQ_RPTR_VLD_LBN 15
+#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1
+#define FRF_AZ_EVQ_RPTR_LBN 0
+#define FRF_AZ_EVQ_RPTR_WIDTH 15
+
+
+/*
+ * FR_BZ_TIMER_COMMAND_REGP0(128bit):
+ * Timer Command Registers
+ */
+#define FR_BZ_TIMER_COMMAND_REGP0_OFST 0x00000420
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_TIMER_COMMAND_REGP0_STEP 8192
+#define FR_BZ_TIMER_COMMAND_REGP0_ROWS 1024
+/*
+ * FR_AA_TIMER_COMMAND_REG_KER(128bit):
+ * Timer Command Registers
+ */
+#define FR_AA_TIMER_COMMAND_REG_KER_OFST 0x00000420
+/* falcona0=net_func_bar2 */
+#define FR_AA_TIMER_COMMAND_REG_KER_STEP 8192
+#define FR_AA_TIMER_COMMAND_REG_KER_ROWS 4
+/*
+ * FR_AB_TIMER_COMMAND_REGP123(128bit):
+ * Timer Command Registers
+ */
+#define FR_AB_TIMER_COMMAND_REGP123_OFST 0x01000420
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_TIMER_COMMAND_REGP123_STEP 8192
+#define FR_AB_TIMER_COMMAND_REGP123_ROWS 3072
+/*
+ * FR_AA_TIMER_COMMAND_REGP0(128bit):
+ * Timer Command Registers
+ */
+#define FR_AA_TIMER_COMMAND_REGP0_OFST 0x00008420
+/* falcona0=char_func_bar0 */
+#define FR_AA_TIMER_COMMAND_REGP0_STEP 8192
+#define FR_AA_TIMER_COMMAND_REGP0_ROWS 1020
+
+#define FRF_CZ_TC_TIMER_MODE_LBN 14
+#define FRF_CZ_TC_TIMER_MODE_WIDTH 2
+#define FRF_AB_TC_TIMER_MODE_LBN 12
+#define FRF_AB_TC_TIMER_MODE_WIDTH 2
+#define FRF_CZ_TC_TIMER_VAL_LBN 0
+#define FRF_CZ_TC_TIMER_VAL_WIDTH 14
+#define FRF_AB_TC_TIMER_VAL_LBN 0
+#define FRF_AB_TC_TIMER_VAL_WIDTH 12
+
+
+/*
+ * FR_AZ_DRV_EV_REG(128bit):
+ * Driver generated event register
+ */
+#define FR_AZ_DRV_EV_REG_OFST 0x00000440
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_DRV_EV_QID_LBN 64
+#define FRF_AZ_DRV_EV_QID_WIDTH 12
+#define FRF_AZ_DRV_EV_DATA_LBN 0
+#define FRF_AZ_DRV_EV_DATA_WIDTH 64
+#define FRF_AZ_DRV_EV_DATA_DW0_LBN 0
+#define FRF_AZ_DRV_EV_DATA_DW0_WIDTH 32
+#define FRF_AZ_DRV_EV_DATA_DW1_LBN 32
+#define FRF_AZ_DRV_EV_DATA_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_EVQ_CTL_REG(128bit):
+ * Event queue control register
+ */
+#define FR_AZ_EVQ_CTL_REG_OFST 0x00000450
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6
+#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14
+#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1
+#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7
+#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7
+
+
+/*
+ * FR_AZ_EVQ_CNT1_REG(128bit):
+ * Event counter 1 register
+ */
+#define FR_AZ_EVQ_CNT1_REG_OFST 0x00000460
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7
+#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100
+#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20
+#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80
+#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20
+
+
+/*
+ * FR_AZ_EVQ_CNT2_REG(128bit):
+ * Event counter 2 register
+ */
+#define FR_AZ_EVQ_CNT2_REG_OFST 0x00000470
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104
+#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84
+#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RDY_CNT_LBN 80
+#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4
+#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20
+
+
+/*
+ * FR_CZ_USR_EV_REG(32bit):
+ * Event mailbox register
+ */
+#define FR_CZ_USR_EV_REG_OFST 0x00000540
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_USR_EV_REG_STEP 8192
+#define FR_CZ_USR_EV_REG_ROWS 1024
+
+#define FRF_CZ_USR_EV_DATA_LBN 0
+#define FRF_CZ_USR_EV_DATA_WIDTH 32
+
+
+/*
+ * FR_AZ_BUF_TBL_CFG_REG(128bit):
+ * Buffer table configuration register
+ */
+#define FR_AZ_BUF_TBL_CFG_REG_OFST 0x00000600
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_BUF_TBL_MODE_LBN 3
+#define FRF_AZ_BUF_TBL_MODE_WIDTH 1
+
+
+/*
+ * FR_AZ_SRM_RX_DC_CFG_REG(128bit):
+ * SRAM receive descriptor cache configuration register
+ */
+#define FR_AZ_SRM_RX_DC_CFG_REG_OFST 0x00000610
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21
+#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21
+
+
+/*
+ * FR_AZ_SRM_TX_DC_CFG_REG(128bit):
+ * SRAM transmit descriptor cache configuration register
+ */
+#define FR_AZ_SRM_TX_DC_CFG_REG_OFST 0x00000620
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21
+
+
+/*
+ * FR_AZ_SRM_CFG_REG(128bit):
+ * SRAM configuration register
+ */
+#define FR_AZ_SRM_CFG_REG_SF_OFST 0x00000380
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AZ_SRM_CFG_REG(128bit):
+ * SRAM configuration register
+ */
+#define FR_AZ_SRM_CFG_REG_OFST 0x00000630
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5
+#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1
+#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4
+#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1
+#define FRF_AZ_SRM_INIT_EN_LBN 3
+#define FRF_AZ_SRM_INIT_EN_WIDTH 1
+#define FRF_AZ_SRM_NUM_BANK_LBN 2
+#define FRF_AZ_SRM_NUM_BANK_WIDTH 1
+#define FRF_AZ_SRM_BANK_SIZE_LBN 0
+#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2
+
+
+/*
+ * FR_AZ_BUF_TBL_UPD_REG(128bit):
+ * Buffer table update register
+ */
+#define FR_AZ_BUF_TBL_UPD_REG_OFST 0x00000650
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_BUF_UPD_CMD_LBN 63
+#define FRF_AZ_BUF_UPD_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_CMD_LBN 62
+#define FRF_AZ_BUF_CLR_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_END_ID_LBN 32
+#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20
+#define FRF_AZ_BUF_CLR_START_ID_LBN 0
+#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20
+
+
+/*
+ * FR_AZ_SRM_UPD_EVQ_REG(128bit):
+ * Buffer table update register
+ */
+#define FR_AZ_SRM_UPD_EVQ_REG_OFST 0x00000660
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0
+#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12
+
+
+/*
+ * FR_AZ_SRAM_PARITY_REG(128bit):
+ * SRAM parity register.
+ */
+#define FR_AZ_SRAM_PARITY_REG_OFST 0x00000670
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_BYPASS_ECC_LBN 3
+#define FRF_CZ_BYPASS_ECC_WIDTH 1
+#define FRF_CZ_SEC_INT_LBN 2
+#define FRF_CZ_SEC_INT_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1
+#define FRF_AB_FORCE_SRAM_PERR_LBN 0
+#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1
+
+
+/*
+ * FR_AZ_RX_CFG_REG(128bit):
+ * Receive configuration register
+ */
+#define FR_AZ_RX_CFG_REG_OFST 0x00000800
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71
+#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49
+#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4
+#define FRF_BZ_RX_TCP_SUP_LBN 48
+#define FRF_BZ_RX_TCP_SUP_WIDTH 1
+#define FRF_BZ_RX_INGR_EN_LBN 47
+#define FRF_BZ_RX_INGR_EN_WIDTH 1
+#define FRF_BZ_RX_IP_HASH_LBN 46
+#define FRF_BZ_RX_IP_HASH_WIDTH 1
+#define FRF_BZ_RX_HASH_ALG_LBN 45
+#define FRF_BZ_RX_HASH_ALG_WIDTH 1
+#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44
+#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1
+#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43
+#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42
+#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39
+#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_OWNERR_CTL_LBN 38
+#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1
+#define FRF_BZ_RX_XON_TX_TH_LBN 33
+#define FRF_BZ_RX_XON_TX_TH_WIDTH 5
+#define FRF_AA_RX_DESC_PUSH_EN_LBN 35
+#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_AA_RX_RDW_PATCH_EN_LBN 34
+#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31
+#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_XOFF_TX_TH_LBN 28
+#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_OWNERR_CTL_LBN 30
+#define FRF_AA_RX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_RX_XON_TX_TH_LBN 25
+#define FRF_AA_RX_XON_TX_TH_WIDTH 5
+#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19
+#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_AA_RX_XOFF_TX_TH_LBN 20
+#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_USR_BUF_SIZE_LBN 11
+#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_BZ_RX_XON_MAC_TH_LBN 10
+#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XON_MAC_TH_LBN 6
+#define FRF_AA_RX_XON_MAC_TH_WIDTH 5
+#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1
+#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XOFF_MAC_TH_LBN 1
+#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5
+#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0
+#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1
+
+
+/*
+ * FR_AZ_RX_FILTER_CTL_REG(128bit):
+ * Receive filter control registers
+ */
+#define FR_AZ_RX_FILTER_CTL_REG_OFST 0x00000810
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1
+#define FRF_AZ_UDP_FULL_SRCH_LIMIT_LBN 32
+#define FRF_AZ_UDP_FULL_SRCH_LIMIT_WIDTH 8
+#define FRF_AZ_NUM_KER_LBN 24
+#define FRF_AZ_NUM_KER_WIDTH 2
+#define FRF_AZ_UDP_WILD_SRCH_LIMIT_LBN 16
+#define FRF_AZ_UDP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_AZ_TCP_WILD_SRCH_LIMIT_LBN 8
+#define FRF_AZ_TCP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_AZ_TCP_FULL_SRCH_LIMIT_LBN 0
+#define FRF_AZ_TCP_FULL_SRCH_LIMIT_WIDTH 8
+
+
+/*
+ * FR_AZ_RX_FLUSH_DESCQ_REG(128bit):
+ * Receive flush descriptor queue register
+ */
+#define FR_AZ_RX_FLUSH_DESCQ_REG_OFST 0x00000820
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12
+
+
+/*
+ * FR_BZ_RX_DESC_UPD_REGP0(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_BZ_RX_DESC_UPD_REGP0_OFST 0x00000830
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_RX_DESC_UPD_REGP0_STEP 8192
+#define FR_BZ_RX_DESC_UPD_REGP0_ROWS 1024
+/*
+ * FR_AA_RX_DESC_UPD_REG_KER(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_AA_RX_DESC_UPD_REG_KER_OFST 0x00000830
+/* falcona0=net_func_bar2 */
+#define FR_AA_RX_DESC_UPD_REG_KER_STEP 8192
+#define FR_AA_RX_DESC_UPD_REG_KER_ROWS 4
+/*
+ * FR_AB_RX_DESC_UPD_REGP123(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_AB_RX_DESC_UPD_REGP123_OFST 0x01000830
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_RX_DESC_UPD_REGP123_STEP 8192
+#define FR_AB_RX_DESC_UPD_REGP123_ROWS 3072
+/*
+ * FR_AA_RX_DESC_UPD_REGP0(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_AA_RX_DESC_UPD_REGP0_OFST 0x00008830
+/* falcona0=char_func_bar0 */
+#define FR_AA_RX_DESC_UPD_REGP0_STEP 8192
+#define FR_AA_RX_DESC_UPD_REGP0_ROWS 1020
+
+#define FRF_AZ_RX_DESC_WPTR_LBN 96
+#define FRF_AZ_RX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_RX_DESC_LBN 0
+#define FRF_AZ_RX_DESC_WIDTH 64
+#define FRF_AZ_RX_DESC_DW0_LBN 0
+#define FRF_AZ_RX_DESC_DW0_WIDTH 32
+#define FRF_AZ_RX_DESC_DW1_LBN 32
+#define FRF_AZ_RX_DESC_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_RX_DC_CFG_REG(128bit):
+ * Receive descriptor cache configuration register
+ */
+#define FR_AZ_RX_DC_CFG_REG_OFST 0x00000840
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_MAX_PF_LBN 2
+#define FRF_AZ_RX_MAX_PF_WIDTH 2
+#define FRF_AZ_RX_DC_SIZE_LBN 0
+#define FRF_AZ_RX_DC_SIZE_WIDTH 2
+#define FFE_AZ_RX_DC_SIZE_64 3
+#define FFE_AZ_RX_DC_SIZE_32 2
+#define FFE_AZ_RX_DC_SIZE_16 1
+#define FFE_AZ_RX_DC_SIZE_8 0
+
+
+/*
+ * FR_AZ_RX_DC_PF_WM_REG(128bit):
+ * Receive descriptor cache pre-fetch watermark register
+ */
+#define FR_AZ_RX_DC_PF_WM_REG_OFST 0x00000850
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_DC_PF_HWM_LBN 6
+#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6
+#define FRF_AZ_RX_DC_PF_LWM_LBN 0
+#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6
+
+
+/*
+ * FR_BZ_RX_RSS_TKEY_REG(128bit):
+ * RSS Toeplitz hash key
+ */
+#define FR_BZ_RX_RSS_TKEY_REG_OFST 0x00000860
+/* falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_BZ_RX_RSS_TKEY_LBN 96
+#define FRF_BZ_RX_RSS_TKEY_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW3_LBN 96
+#define FRF_BZ_RX_RSS_TKEY_DW3_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW2_LBN 64
+#define FRF_BZ_RX_RSS_TKEY_DW2_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW1_LBN 32
+#define FRF_BZ_RX_RSS_TKEY_DW1_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW0_LBN 0
+#define FRF_BZ_RX_RSS_TKEY_DW0_WIDTH 32
+
+
+/*
+ * FR_AZ_RX_NODESC_DROP_REG(128bit):
+ * Receive dropped packet counter register
+ */
+#define FR_AZ_RX_NODESC_DROP_REG_OFST 0x00000880
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_NODESC_DROP_CNT_LBN 0
+#define FRF_AZ_RX_NODESC_DROP_CNT_WIDTH 16
+
+
+/*
+ * FR_AZ_RX_SELF_RST_REG(128bit):
+ * Receive self reset register
+ */
+#define FR_AZ_RX_SELF_RST_REG_OFST 0x00000890
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_ISCSI_DIS_LBN 17
+#define FRF_AZ_RX_ISCSI_DIS_WIDTH 1
+#define FRF_AB_RX_SW_RST_REG_LBN 16
+#define FRF_AB_RX_SW_RST_REG_WIDTH 1
+#define FRF_AB_RX_SELF_RST_EN_LBN 8
+#define FRF_AB_RX_SELF_RST_EN_WIDTH 1
+#define FRF_AZ_RX_MAX_PF_LAT_LBN 4
+#define FRF_AZ_RX_MAX_PF_LAT_WIDTH 4
+#define FRF_AZ_RX_MAX_LU_LAT_LBN 0
+#define FRF_AZ_RX_MAX_LU_LAT_WIDTH 4
+
+
+/*
+ * FR_AZ_RX_DEBUG_REG(128bit):
+ * undocumented register
+ */
+#define FR_AZ_RX_DEBUG_REG_OFST 0x000008a0
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_DEBUG_LBN 0
+#define FRF_AZ_RX_DEBUG_WIDTH 64
+#define FRF_AZ_RX_DEBUG_DW0_LBN 0
+#define FRF_AZ_RX_DEBUG_DW0_WIDTH 32
+#define FRF_AZ_RX_DEBUG_DW1_LBN 32
+#define FRF_AZ_RX_DEBUG_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_RX_PUSH_DROP_REG(128bit):
+ * Receive descriptor push dropped counter register
+ */
+#define FR_AZ_RX_PUSH_DROP_REG_OFST 0x000008b0
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32
+
+
+/*
+ * FR_CZ_RX_RSS_IPV6_REG1(128bit):
+ * IPv6 RSS Toeplitz hash key low bytes
+ */
+#define FR_CZ_RX_RSS_IPV6_REG1_OFST 0x000008d0
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW0_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW0_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW1_LBN 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW1_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW2_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW2_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW3_LBN 96
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW3_WIDTH 32
+
+
+/*
+ * FR_CZ_RX_RSS_IPV6_REG2(128bit):
+ * IPv6 RSS Toeplitz hash key middle bytes
+ */
+#define FR_CZ_RX_RSS_IPV6_REG2_OFST 0x000008e0
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW0_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW0_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW1_LBN 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW1_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW2_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW2_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW3_LBN 96
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW3_WIDTH 32
+
+
+/*
+ * FR_CZ_RX_RSS_IPV6_REG3(128bit):
+ * IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings
+ */
+#define FR_CZ_RX_RSS_IPV6_REG3_OFST 0x000008f0
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW0_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW0_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW1_LBN 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_TX_FLUSH_DESCQ_REG(128bit):
+ * Transmit flush descriptor queue register
+ */
+#define FR_AZ_TX_FLUSH_DESCQ_REG_OFST 0x00000a00
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12
+
+
+/*
+ * FR_BZ_TX_DESC_UPD_REGP0(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_BZ_TX_DESC_UPD_REGP0_OFST 0x00000a10
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_TX_DESC_UPD_REGP0_STEP 8192
+#define FR_BZ_TX_DESC_UPD_REGP0_ROWS 1024
+/*
+ * FR_AA_TX_DESC_UPD_REG_KER(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_AA_TX_DESC_UPD_REG_KER_OFST 0x00000a10
+/* falcona0=net_func_bar2 */
+#define FR_AA_TX_DESC_UPD_REG_KER_STEP 8192
+#define FR_AA_TX_DESC_UPD_REG_KER_ROWS 8
+/*
+ * FR_AB_TX_DESC_UPD_REGP123(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_AB_TX_DESC_UPD_REGP123_OFST 0x01000a10
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_TX_DESC_UPD_REGP123_STEP 8192
+#define FR_AB_TX_DESC_UPD_REGP123_ROWS 3072
+/*
+ * FR_AA_TX_DESC_UPD_REGP0(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_AA_TX_DESC_UPD_REGP0_OFST 0x00008a10
+/* falcona0=char_func_bar0 */
+#define FR_AA_TX_DESC_UPD_REGP0_STEP 8192
+#define FR_AA_TX_DESC_UPD_REGP0_ROWS 1020
+
+#define FRF_AZ_TX_DESC_WPTR_LBN 96
+#define FRF_AZ_TX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_TX_DESC_LBN 0
+#define FRF_AZ_TX_DESC_WIDTH 95
+#define FRF_AZ_TX_DESC_DW0_LBN 0
+#define FRF_AZ_TX_DESC_DW0_WIDTH 32
+#define FRF_AZ_TX_DESC_DW1_LBN 32
+#define FRF_AZ_TX_DESC_DW1_WIDTH 32
+#define FRF_AZ_TX_DESC_DW2_LBN 64
+#define FRF_AZ_TX_DESC_DW2_WIDTH 31
+
+
+/*
+ * FR_AZ_TX_DC_CFG_REG(128bit):
+ * Transmit descriptor cache configuration register
+ */
+#define FR_AZ_TX_DC_CFG_REG_OFST 0x00000a20
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_DC_SIZE_LBN 0
+#define FRF_AZ_TX_DC_SIZE_WIDTH 2
+#define FFE_AZ_TX_DC_SIZE_32 2
+#define FFE_AZ_TX_DC_SIZE_16 1
+#define FFE_AZ_TX_DC_SIZE_8 0
+
+
+/*
+ * FR_AA_TX_CHKSM_CFG_REG(128bit):
+ * Transmit checksum configuration register
+ */
+#define FR_AA_TX_CHKSM_CFG_REG_OFST 0x00000a30
+/* falcona0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32
+
+
+/*
+ * FR_AZ_TX_CFG_REG(128bit):
+ * Transmit configuration register
+ */
+#define FR_AZ_TX_CFG_REG_OFST 0x00000a50
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16
+#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47
+#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1
+#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16
+#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15
+#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5
+#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1
+#define FRF_AZ_TX_P1_PRI_EN_LBN 4
+#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1
+#define FRF_AZ_TX_OWNERR_CTL_LBN 2
+#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0
+#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1
+
+
+/*
+ * FR_AZ_TX_PUSH_DROP_REG(128bit):
+ * Transmit push dropped register
+ */
+#define FR_AZ_TX_PUSH_DROP_REG_OFST 0x00000a60
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32
+
+
+/*
+ * FR_AZ_TX_RESERVED_REG(128bit):
+ * Transmit configuration register
+ */
+#define FR_AZ_TX_RESERVED_REG_OFST 0x00000a80
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_EVT_CNT_LBN 121
+#define FRF_AZ_TX_EVT_CNT_WIDTH 7
+#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119
+#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2
+#define FRF_AZ_TX_RD_COMP_TMR_LBN 96
+#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23
+#define FRF_AZ_TX_PUSH_EN_LBN 89
+#define FRF_AZ_TX_PUSH_EN_WIDTH 1
+#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88
+#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1
+#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85
+#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1
+#define FRF_AZ_TX_DMAR_ST_P0_LBN 81
+#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1
+#define FRF_AZ_TX_DMAQ_ST_LBN 78
+#define FRF_AZ_TX_DMAQ_ST_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_LBN 64
+#define FRF_AZ_TX_RX_SPACER_WIDTH 8
+#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60
+#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1
+#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59
+#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1
+#define FRF_AZ_TX_PS_EVT_DIS_LBN 58
+#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_EN_LBN 57
+#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1
+#define FRF_AZ_TX_XP_TIMER_LBN 52
+#define FRF_AZ_TX_XP_TIMER_WIDTH 5
+#define FRF_AZ_TX_PREF_SPACER_LBN 44
+#define FRF_AZ_TX_PREF_SPACER_WIDTH 8
+#define FRF_AZ_TX_PREF_WD_TMR_LBN 22
+#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22
+#define FRF_AZ_TX_ONLY1TAG_LBN 21
+#define FRF_AZ_TX_ONLY1TAG_WIDTH 1
+#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19
+#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2
+#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18
+#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1
+#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17
+#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1
+#define FRF_AA_TX_DMA_FF_THR_LBN 16
+#define FRF_AA_TX_DMA_FF_THR_WIDTH 1
+#define FRF_AZ_TX_DMA_SPACER_LBN 8
+#define FRF_AZ_TX_DMA_SPACER_WIDTH 8
+#define FRF_AA_TX_TCP_DIS_LBN 7
+#define FRF_AA_TX_TCP_DIS_WIDTH 1
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1
+#define FRF_AA_TX_IP_DIS_LBN 6
+#define FRF_AA_TX_IP_DIS_WIDTH 1
+#define FRF_AZ_TX_MAX_CPL_LBN 2
+#define FRF_AZ_TX_MAX_CPL_WIDTH 2
+#define FFE_AZ_TX_MAX_CPL_16 3
+#define FFE_AZ_TX_MAX_CPL_8 2
+#define FFE_AZ_TX_MAX_CPL_4 1
+#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0
+#define FRF_AZ_TX_MAX_PREF_LBN 0
+#define FRF_AZ_TX_MAX_PREF_WIDTH 2
+#define FFE_AZ_TX_MAX_PREF_32 3
+#define FFE_AZ_TX_MAX_PREF_16 2
+#define FFE_AZ_TX_MAX_PREF_8 1
+#define FFE_AZ_TX_MAX_PREF_OFF 0
+
+
+/*
+ * FR_BZ_TX_PACE_REG(128bit):
+ * Transmit pace control register
+ */
+#define FR_BZ_TX_PACE_REG_OFST 0x00000a90
+/* falconb0,sienaa0=net_func_bar2 */
+/*
+ * FR_AA_TX_PACE_REG(128bit):
+ * Transmit pace control register
+ */
+#define FR_AA_TX_PACE_REG_OFST 0x00f80000
+/* falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_PACE_SB_NOT_AF_LBN 19
+#define FRF_AZ_TX_PACE_SB_NOT_AF_WIDTH 10
+#define FRF_AZ_TX_PACE_SB_AF_LBN 9
+#define FRF_AZ_TX_PACE_SB_AF_WIDTH 10
+#define FRF_AZ_TX_PACE_FB_BASE_LBN 5
+#define FRF_AZ_TX_PACE_FB_BASE_WIDTH 4
+#define FRF_AZ_TX_PACE_BIN_TH_LBN 0
+#define FRF_AZ_TX_PACE_BIN_TH_WIDTH 5
+
+
+/*
+ * FR_AZ_TX_PACE_DROP_QID_REG(128bit):
+ * PACE Drop QID Counter
+ */
+#define FR_AZ_TX_PACE_DROP_QID_REG_OFST 0x00000aa0
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_PACE_QID_DRP_CNT_LBN 0
+#define FRF_AZ_TX_PACE_QID_DRP_CNT_WIDTH 16
+
+
+/*
+ * FR_AB_TX_VLAN_REG(128bit):
+ * Transmit VLAN tag register
+ */
+#define FR_AB_TX_VLAN_REG_OFST 0x00000ae0
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_TX_VLAN_EN_LBN 127
+#define FRF_AB_TX_VLAN_EN_WIDTH 1
+#define FRF_AB_TX_VLAN7_PORT1_EN_LBN 125
+#define FRF_AB_TX_VLAN7_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN7_PORT0_EN_LBN 124
+#define FRF_AB_TX_VLAN7_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN7_LBN 112
+#define FRF_AB_TX_VLAN7_WIDTH 12
+#define FRF_AB_TX_VLAN6_PORT1_EN_LBN 109
+#define FRF_AB_TX_VLAN6_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN6_PORT0_EN_LBN 108
+#define FRF_AB_TX_VLAN6_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN6_LBN 96
+#define FRF_AB_TX_VLAN6_WIDTH 12
+#define FRF_AB_TX_VLAN5_PORT1_EN_LBN 93
+#define FRF_AB_TX_VLAN5_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN5_PORT0_EN_LBN 92
+#define FRF_AB_TX_VLAN5_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN5_LBN 80
+#define FRF_AB_TX_VLAN5_WIDTH 12
+#define FRF_AB_TX_VLAN4_PORT1_EN_LBN 77
+#define FRF_AB_TX_VLAN4_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN4_PORT0_EN_LBN 76
+#define FRF_AB_TX_VLAN4_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN4_LBN 64
+#define FRF_AB_TX_VLAN4_WIDTH 12
+#define FRF_AB_TX_VLAN3_PORT1_EN_LBN 61
+#define FRF_AB_TX_VLAN3_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN3_PORT0_EN_LBN 60
+#define FRF_AB_TX_VLAN3_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN3_LBN 48
+#define FRF_AB_TX_VLAN3_WIDTH 12
+#define FRF_AB_TX_VLAN2_PORT1_EN_LBN 45
+#define FRF_AB_TX_VLAN2_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN2_PORT0_EN_LBN 44
+#define FRF_AB_TX_VLAN2_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN2_LBN 32
+#define FRF_AB_TX_VLAN2_WIDTH 12
+#define FRF_AB_TX_VLAN1_PORT1_EN_LBN 29
+#define FRF_AB_TX_VLAN1_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN1_PORT0_EN_LBN 28
+#define FRF_AB_TX_VLAN1_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN1_LBN 16
+#define FRF_AB_TX_VLAN1_WIDTH 12
+#define FRF_AB_TX_VLAN0_PORT1_EN_LBN 13
+#define FRF_AB_TX_VLAN0_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN0_PORT0_EN_LBN 12
+#define FRF_AB_TX_VLAN0_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN0_LBN 0
+#define FRF_AB_TX_VLAN0_WIDTH 12
+
+
+/*
+ * FR_AZ_TX_IPFIL_PORTEN_REG(128bit):
+ * Transmit filter control register
+ */
+#define FR_AZ_TX_IPFIL_PORTEN_REG_OFST 0x00000af0
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_MADR0_FIL_EN_LBN 64
+#define FRF_AZ_TX_MADR0_FIL_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL31_PORT_EN_LBN 62
+#define FRF_AB_TX_IPFIL31_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL30_PORT_EN_LBN 60
+#define FRF_AB_TX_IPFIL30_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL29_PORT_EN_LBN 58
+#define FRF_AB_TX_IPFIL29_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL28_PORT_EN_LBN 56
+#define FRF_AB_TX_IPFIL28_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL27_PORT_EN_LBN 54
+#define FRF_AB_TX_IPFIL27_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL26_PORT_EN_LBN 52
+#define FRF_AB_TX_IPFIL26_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL25_PORT_EN_LBN 50
+#define FRF_AB_TX_IPFIL25_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL24_PORT_EN_LBN 48
+#define FRF_AB_TX_IPFIL24_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL23_PORT_EN_LBN 46
+#define FRF_AB_TX_IPFIL23_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL22_PORT_EN_LBN 44
+#define FRF_AB_TX_IPFIL22_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL21_PORT_EN_LBN 42
+#define FRF_AB_TX_IPFIL21_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL20_PORT_EN_LBN 40
+#define FRF_AB_TX_IPFIL20_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL19_PORT_EN_LBN 38
+#define FRF_AB_TX_IPFIL19_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL18_PORT_EN_LBN 36
+#define FRF_AB_TX_IPFIL18_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL17_PORT_EN_LBN 34
+#define FRF_AB_TX_IPFIL17_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL16_PORT_EN_LBN 32
+#define FRF_AB_TX_IPFIL16_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL15_PORT_EN_LBN 30
+#define FRF_AB_TX_IPFIL15_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL14_PORT_EN_LBN 28
+#define FRF_AB_TX_IPFIL14_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL13_PORT_EN_LBN 26
+#define FRF_AB_TX_IPFIL13_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL12_PORT_EN_LBN 24
+#define FRF_AB_TX_IPFIL12_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL11_PORT_EN_LBN 22
+#define FRF_AB_TX_IPFIL11_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL10_PORT_EN_LBN 20
+#define FRF_AB_TX_IPFIL10_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL9_PORT_EN_LBN 18
+#define FRF_AB_TX_IPFIL9_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL8_PORT_EN_LBN 16
+#define FRF_AB_TX_IPFIL8_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL7_PORT_EN_LBN 14
+#define FRF_AB_TX_IPFIL7_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL6_PORT_EN_LBN 12
+#define FRF_AB_TX_IPFIL6_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL5_PORT_EN_LBN 10
+#define FRF_AB_TX_IPFIL5_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL4_PORT_EN_LBN 8
+#define FRF_AB_TX_IPFIL4_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL3_PORT_EN_LBN 6
+#define FRF_AB_TX_IPFIL3_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL2_PORT_EN_LBN 4
+#define FRF_AB_TX_IPFIL2_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL1_PORT_EN_LBN 2
+#define FRF_AB_TX_IPFIL1_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL0_PORT_EN_LBN 0
+#define FRF_AB_TX_IPFIL0_PORT_EN_WIDTH 1
+
+
+/*
+ * FR_AB_TX_IPFIL_TBL(128bit):
+ * Transmit IP source address filter table
+ */
+#define FR_AB_TX_IPFIL_TBL_OFST 0x00000b00
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_TX_IPFIL_TBL_STEP 16
+#define FR_AB_TX_IPFIL_TBL_ROWS 16
+
+#define FRF_AB_TX_IPFIL_MASK_1_LBN 96
+#define FRF_AB_TX_IPFIL_MASK_1_WIDTH 32
+#define FRF_AB_TX_IP_SRC_ADR_1_LBN 64
+#define FRF_AB_TX_IP_SRC_ADR_1_WIDTH 32
+#define FRF_AB_TX_IPFIL_MASK_0_LBN 32
+#define FRF_AB_TX_IPFIL_MASK_0_WIDTH 32
+#define FRF_AB_TX_IP_SRC_ADR_0_LBN 0
+#define FRF_AB_TX_IP_SRC_ADR_0_WIDTH 32
+
+
+/*
+ * FR_AB_MD_TXD_REG(128bit):
+ * PHY management transmit data register
+ */
+#define FR_AB_MD_TXD_REG_OFST 0x00000c00
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_TXD_LBN 0
+#define FRF_AB_MD_TXD_WIDTH 16
+
+
+/*
+ * FR_AB_MD_RXD_REG(128bit):
+ * PHY management receive data register
+ */
+#define FR_AB_MD_RXD_REG_OFST 0x00000c10
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_RXD_LBN 0
+#define FRF_AB_MD_RXD_WIDTH 16
+
+
+/*
+ * FR_AB_MD_CS_REG(128bit):
+ * PHY management configuration & status register
+ */
+#define FR_AB_MD_CS_REG_OFST 0x00000c20
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_RD_EN_LBN 15
+#define FRF_AB_MD_RD_EN_WIDTH 1
+#define FRF_AB_MD_WR_EN_LBN 14
+#define FRF_AB_MD_WR_EN_WIDTH 1
+#define FRF_AB_MD_ADDR_CMD_LBN 13
+#define FRF_AB_MD_ADDR_CMD_WIDTH 1
+#define FRF_AB_MD_PT_LBN 7
+#define FRF_AB_MD_PT_WIDTH 3
+#define FRF_AB_MD_PL_LBN 6
+#define FRF_AB_MD_PL_WIDTH 1
+#define FRF_AB_MD_INT_CLR_LBN 5
+#define FRF_AB_MD_INT_CLR_WIDTH 1
+#define FRF_AB_MD_GC_LBN 4
+#define FRF_AB_MD_GC_WIDTH 1
+#define FRF_AB_MD_PRSP_LBN 3
+#define FRF_AB_MD_PRSP_WIDTH 1
+#define FRF_AB_MD_RIC_LBN 2
+#define FRF_AB_MD_RIC_WIDTH 1
+#define FRF_AB_MD_RDC_LBN 1
+#define FRF_AB_MD_RDC_WIDTH 1
+#define FRF_AB_MD_WRC_LBN 0
+#define FRF_AB_MD_WRC_WIDTH 1
+
+
+/*
+ * FR_AB_MD_PHY_ADR_REG(128bit):
+ * PHY management PHY address register
+ */
+#define FR_AB_MD_PHY_ADR_REG_OFST 0x00000c30
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_PHY_ADR_LBN 0
+#define FRF_AB_MD_PHY_ADR_WIDTH 16
+
+
+/*
+ * FR_AB_MD_ID_REG(128bit):
+ * PHY management ID register
+ */
+#define FR_AB_MD_ID_REG_OFST 0x00000c40
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_PRT_ADR_LBN 11
+#define FRF_AB_MD_PRT_ADR_WIDTH 5
+#define FRF_AB_MD_DEV_ADR_LBN 6
+#define FRF_AB_MD_DEV_ADR_WIDTH 5
+
+
+/*
+ * FR_AB_MD_STAT_REG(128bit):
+ * PHY management status & mask register
+ */
+#define FR_AB_MD_STAT_REG_OFST 0x00000c50
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_PINT_LBN 4
+#define FRF_AB_MD_PINT_WIDTH 1
+#define FRF_AB_MD_DONE_LBN 3
+#define FRF_AB_MD_DONE_WIDTH 1
+#define FRF_AB_MD_BSERR_LBN 2
+#define FRF_AB_MD_BSERR_WIDTH 1
+#define FRF_AB_MD_LNFL_LBN 1
+#define FRF_AB_MD_LNFL_WIDTH 1
+#define FRF_AB_MD_BSY_LBN 0
+#define FRF_AB_MD_BSY_WIDTH 1
+
+
+/*
+ * FR_AB_MAC_STAT_DMA_REG(128bit):
+ * Port MAC statistical counter DMA register
+ */
+#define FR_AB_MAC_STAT_DMA_REG_OFST 0x00000c60
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48
+#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1
+#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0
+#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48
+#define FRF_AB_MAC_STAT_DMA_ADR_DW0_LBN 0
+#define FRF_AB_MAC_STAT_DMA_ADR_DW0_WIDTH 32
+#define FRF_AB_MAC_STAT_DMA_ADR_DW1_LBN 32
+#define FRF_AB_MAC_STAT_DMA_ADR_DW1_WIDTH 16
+
+
+/*
+ * FR_AB_MAC_CTRL_REG(128bit):
+ * Port MAC control register
+ */
+#define FR_AB_MAC_CTRL_REG_OFST 0x00000c80
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_XOFF_VAL_LBN 16
+#define FRF_AB_MAC_XOFF_VAL_WIDTH 16
+#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7
+#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1
+#define FRF_AB_MAC_XG_DISTXCRC_LBN 5
+#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1
+#define FRF_AB_MAC_BCAD_ACPT_LBN 4
+#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1
+#define FRF_AB_MAC_UC_PROM_LBN 3
+#define FRF_AB_MAC_UC_PROM_WIDTH 1
+#define FRF_AB_MAC_LINK_STATUS_LBN 2
+#define FRF_AB_MAC_LINK_STATUS_WIDTH 1
+#define FRF_AB_MAC_SPEED_LBN 0
+#define FRF_AB_MAC_SPEED_WIDTH 2
+#define FRF_AB_MAC_SPEED_10M 0
+#define FRF_AB_MAC_SPEED_100M 1
+#define FRF_AB_MAC_SPEED_1G 2
+#define FRF_AB_MAC_SPEED_10G 3
+
+/*
+ * FR_BB_GEN_MODE_REG(128bit):
+ * General Purpose mode register (external interrupt mask)
+ */
+#define FR_BB_GEN_MODE_REG_OFST 0x00000c90
+/* falconb0=net_func_bar2 */
+
+#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3
+#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2
+#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XFP_PHY_INT_MASK_LBN 1
+#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1
+#define FRF_BB_XG_PHY_INT_MASK_LBN 0
+#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1
+
+
+/*
+ * FR_AB_MAC_MC_HASH_REG0(128bit):
+ * Multicast address hash table
+ */
+#define FR_AB_MAC_MC_HASH0_REG_OFST 0x00000ca0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_MCAST_HASH0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128
+#define FRF_AB_MAC_MCAST_HASH0_DW0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH0_DW0_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH0_DW1_LBN 32
+#define FRF_AB_MAC_MCAST_HASH0_DW1_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH0_DW2_LBN 64
+#define FRF_AB_MAC_MCAST_HASH0_DW2_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH0_DW3_LBN 96
+#define FRF_AB_MAC_MCAST_HASH0_DW3_WIDTH 32
+
+
+/*
+ * FR_AB_MAC_MC_HASH_REG1(128bit):
+ * Multicast address hash table
+ */
+#define FR_AB_MAC_MC_HASH1_REG_OFST 0x00000cb0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_MCAST_HASH1_LBN 0
+#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128
+#define FRF_AB_MAC_MCAST_HASH1_DW0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH1_DW0_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH1_DW1_LBN 32
+#define FRF_AB_MAC_MCAST_HASH1_DW1_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH1_DW2_LBN 64
+#define FRF_AB_MAC_MCAST_HASH1_DW2_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH1_DW3_LBN 96
+#define FRF_AB_MAC_MCAST_HASH1_DW3_WIDTH 32
+
+
+/*
+ * FR_AB_GM_CFG1_REG(32bit):
+ * GMAC configuration register 1
+ */
+#define FR_AB_GM_CFG1_REG_OFST 0x00000e00
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_SW_RST_LBN 31
+#define FRF_AB_GM_SW_RST_WIDTH 1
+#define FRF_AB_GM_SIM_RST_LBN 30
+#define FRF_AB_GM_SIM_RST_WIDTH 1
+#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19
+#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18
+#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_RX_FUNC_LBN 17
+#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1
+#define FRF_AB_GM_RST_TX_FUNC_LBN 16
+#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1
+#define FRF_AB_GM_LOOP_LBN 8
+#define FRF_AB_GM_LOOP_WIDTH 1
+#define FRF_AB_GM_RX_FC_EN_LBN 5
+#define FRF_AB_GM_RX_FC_EN_WIDTH 1
+#define FRF_AB_GM_TX_FC_EN_LBN 4
+#define FRF_AB_GM_TX_FC_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_RXEN_LBN 3
+#define FRF_AB_GM_SYNC_RXEN_WIDTH 1
+#define FRF_AB_GM_RX_EN_LBN 2
+#define FRF_AB_GM_RX_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_TXEN_LBN 1
+#define FRF_AB_GM_SYNC_TXEN_WIDTH 1
+#define FRF_AB_GM_TX_EN_LBN 0
+#define FRF_AB_GM_TX_EN_WIDTH 1
+
+
+/*
+ * FR_AB_GM_CFG2_REG(32bit):
+ * GMAC configuration register 2
+ */
+#define FR_AB_GM_CFG2_REG_OFST 0x00000e10
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_PAMBL_LEN_LBN 12
+#define FRF_AB_GM_PAMBL_LEN_WIDTH 4
+#define FRF_AB_GM_IF_MODE_LBN 8
+#define FRF_AB_GM_IF_MODE_WIDTH 2
+#define FRF_AB_GM_IF_MODE_BYTE_MODE 2
+#define FRF_AB_GM_IF_MODE_NIBBLE_MODE 1
+#define FRF_AB_GM_HUGE_FRM_EN_LBN 5
+#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1
+#define FRF_AB_GM_LEN_CHK_LBN 4
+#define FRF_AB_GM_LEN_CHK_WIDTH 1
+#define FRF_AB_GM_PAD_CRC_EN_LBN 2
+#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1
+#define FRF_AB_GM_CRC_EN_LBN 1
+#define FRF_AB_GM_CRC_EN_WIDTH 1
+#define FRF_AB_GM_FD_LBN 0
+#define FRF_AB_GM_FD_WIDTH 1
+
+
+/*
+ * FR_AB_GM_IPG_REG(32bit):
+ * GMAC IPG register
+ */
+#define FR_AB_GM_IPG_REG_OFST 0x00000e20
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_NONB2B_IPG1_LBN 24
+#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7
+#define FRF_AB_GM_NONB2B_IPG2_LBN 16
+#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7
+#define FRF_AB_GM_MIN_IPG_ENF_LBN 8
+#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8
+#define FRF_AB_GM_B2B_IPG_LBN 0
+#define FRF_AB_GM_B2B_IPG_WIDTH 7
+
+
+/*
+ * FR_AB_GM_HD_REG(32bit):
+ * GMAC half duplex register
+ */
+#define FR_AB_GM_HD_REG_OFST 0x00000e30
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20
+#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4
+#define FRF_AB_GM_ALT_BOFF_EN_LBN 19
+#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1
+#define FRF_AB_GM_BP_NO_BOFF_LBN 18
+#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1
+#define FRF_AB_GM_DIS_BOFF_LBN 17
+#define FRF_AB_GM_DIS_BOFF_WIDTH 1
+#define FRF_AB_GM_EXDEF_TX_EN_LBN 16
+#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1
+#define FRF_AB_GM_RTRY_LIMIT_LBN 12
+#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4
+#define FRF_AB_GM_COL_WIN_LBN 0
+#define FRF_AB_GM_COL_WIN_WIDTH 10
+
+
+/*
+ * FR_AB_GM_MAX_FLEN_REG(32bit):
+ * GMAC maximum frame length register
+ */
+#define FR_AB_GM_MAX_FLEN_REG_OFST 0x00000e40
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_MAX_FLEN_LBN 0
+#define FRF_AB_GM_MAX_FLEN_WIDTH 16
+
+
+/*
+ * FR_AB_GM_TEST_REG(32bit):
+ * GMAC test register
+ */
+#define FR_AB_GM_TEST_REG_OFST 0x00000e70
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_MAX_BOFF_LBN 3
+#define FRF_AB_GM_MAX_BOFF_WIDTH 1
+#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2
+#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1
+#define FRF_AB_GM_TEST_PAUSE_LBN 1
+#define FRF_AB_GM_TEST_PAUSE_WIDTH 1
+#define FRF_AB_GM_SHORT_SLOT_LBN 0
+#define FRF_AB_GM_SHORT_SLOT_WIDTH 1
+
+
+/*
+ * FR_AB_GM_ADR1_REG(32bit):
+ * GMAC station address register 1
+ */
+#define FR_AB_GM_ADR1_REG_OFST 0x00000f00
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_ADR_B0_LBN 24
+#define FRF_AB_GM_ADR_B0_WIDTH 8
+#define FRF_AB_GM_ADR_B1_LBN 16
+#define FRF_AB_GM_ADR_B1_WIDTH 8
+#define FRF_AB_GM_ADR_B2_LBN 8
+#define FRF_AB_GM_ADR_B2_WIDTH 8
+#define FRF_AB_GM_ADR_B3_LBN 0
+#define FRF_AB_GM_ADR_B3_WIDTH 8
+
+
+/*
+ * FR_AB_GM_ADR2_REG(32bit):
+ * GMAC station address register 2
+ */
+#define FR_AB_GM_ADR2_REG_OFST 0x00000f10
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_ADR_B4_LBN 24
+#define FRF_AB_GM_ADR_B4_WIDTH 8
+#define FRF_AB_GM_ADR_B5_LBN 16
+#define FRF_AB_GM_ADR_B5_WIDTH 8
+
+
+/*
+ * FR_AB_GMF_CFG0_REG(32bit):
+ * GMAC FIFO configuration register 0
+ */
+#define FR_AB_GMF_CFG0_REG_OFST 0x00000f20
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_FTFENRPLY_LBN 20
+#define FRF_AB_GMF_FTFENRPLY_WIDTH 1
+#define FRF_AB_GMF_STFENRPLY_LBN 19
+#define FRF_AB_GMF_STFENRPLY_WIDTH 1
+#define FRF_AB_GMF_FRFENRPLY_LBN 18
+#define FRF_AB_GMF_FRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_SRFENRPLY_LBN 17
+#define FRF_AB_GMF_SRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_WTMENRPLY_LBN 16
+#define FRF_AB_GMF_WTMENRPLY_WIDTH 1
+#define FRF_AB_GMF_FTFENREQ_LBN 12
+#define FRF_AB_GMF_FTFENREQ_WIDTH 1
+#define FRF_AB_GMF_STFENREQ_LBN 11
+#define FRF_AB_GMF_STFENREQ_WIDTH 1
+#define FRF_AB_GMF_FRFENREQ_LBN 10
+#define FRF_AB_GMF_FRFENREQ_WIDTH 1
+#define FRF_AB_GMF_SRFENREQ_LBN 9
+#define FRF_AB_GMF_SRFENREQ_WIDTH 1
+#define FRF_AB_GMF_WTMENREQ_LBN 8
+#define FRF_AB_GMF_WTMENREQ_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFT_LBN 4
+#define FRF_AB_GMF_HSTRSTFT_WIDTH 1
+#define FRF_AB_GMF_HSTRSTST_LBN 3
+#define FRF_AB_GMF_HSTRSTST_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFR_LBN 2
+#define FRF_AB_GMF_HSTRSTFR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTSR_LBN 1
+#define FRF_AB_GMF_HSTRSTSR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTWT_LBN 0
+#define FRF_AB_GMF_HSTRSTWT_WIDTH 1
+
+
+/*
+ * FR_AB_GMF_CFG1_REG(32bit):
+ * GMAC FIFO configuration register 1
+ */
+#define FR_AB_GMF_CFG1_REG_OFST 0x00000f30
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGFRTH_LBN 16
+#define FRF_AB_GMF_CFGFRTH_WIDTH 5
+#define FRF_AB_GMF_CFGXOFFRTX_LBN 0
+#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16
+
+
+/*
+ * FR_AB_GMF_CFG2_REG(32bit):
+ * GMAC FIFO configuration register 2
+ */
+#define FR_AB_GMF_CFG2_REG_OFST 0x00000f40
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGHWM_LBN 16
+#define FRF_AB_GMF_CFGHWM_WIDTH 6
+#define FRF_AB_GMF_CFGLWM_LBN 0
+#define FRF_AB_GMF_CFGLWM_WIDTH 6
+
+
+/*
+ * FR_AB_GMF_CFG3_REG(32bit):
+ * GMAC FIFO configuration register 3
+ */
+#define FR_AB_GMF_CFG3_REG_OFST 0x00000f50
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGHWMFT_LBN 16
+#define FRF_AB_GMF_CFGHWMFT_WIDTH 6
+#define FRF_AB_GMF_CFGFTTH_LBN 0
+#define FRF_AB_GMF_CFGFTTH_WIDTH 6
+
+
+/*
+ * FR_AB_GMF_CFG4_REG(32bit):
+ * GMAC FIFO configuration register 4
+ */
+#define FR_AB_GMF_CFG4_REG_OFST 0x00000f60
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_HSTFLTRFRM_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18
+
+
+/*
+ * FR_AB_GMF_CFG5_REG(32bit):
+ * GMAC FIFO configuration register 5
+ */
+#define FR_AB_GMF_CFG5_REG_OFST 0x00000f70
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGHDPLX_LBN 22
+#define FRF_AB_GMF_CFGHDPLX_WIDTH 1
+#define FRF_AB_GMF_SRFULL_LBN 21
+#define FRF_AB_GMF_SRFULL_WIDTH 1
+#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20
+#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1
+#define FRF_AB_GMF_CFGBYTMODE_LBN 19
+#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1
+#define FRF_AB_GMF_HSTDRPLT64_LBN 18
+#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1
+#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18
+
+
+/*
+ * FR_BB_TX_SRC_MAC_TBL(128bit):
+ * Transmit IP source address filter table
+ */
+#define FR_BB_TX_SRC_MAC_TBL_OFST 0x00001000
+/* falconb0=net_func_bar2 */
+#define FR_BB_TX_SRC_MAC_TBL_STEP 16
+#define FR_BB_TX_SRC_MAC_TBL_ROWS 16
+
+#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64
+#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW0_LBN 64
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW0_WIDTH 32
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW1_LBN 96
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW1_WIDTH 16
+#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0
+#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW0_LBN 0
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW0_WIDTH 32
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW1_LBN 32
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW1_WIDTH 16
+
+
+/*
+ * FR_BB_TX_SRC_MAC_CTL_REG(128bit):
+ * Transmit MAC source address filter control
+ */
+#define FR_BB_TX_SRC_MAC_CTL_REG_OFST 0x00001100
+/* falconb0=net_func_bar2 */
+
+#define FRF_BB_TX_SRC_DROP_CTR_LBN 16
+#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16
+#define FRF_BB_TX_SRC_FLTR_EN_LBN 15
+#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1
+#define FRF_BB_TX_DROP_CTR_CLR_LBN 12
+#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1
+#define FRF_BB_TX_MAC_QID_SEL_LBN 0
+#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3
+
+
+/*
+ * FR_AB_XM_ADR_LO_REG(128bit):
+ * XGMAC address register low
+ */
+#define FR_AB_XM_ADR_LO_REG_OFST 0x00001200
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_ADR_LO_LBN 0
+#define FRF_AB_XM_ADR_LO_WIDTH 32
+
+
+/*
+ * FR_AB_XM_ADR_HI_REG(128bit):
+ * XGMAC address register high
+ */
+#define FR_AB_XM_ADR_HI_REG_OFST 0x00001210
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_ADR_HI_LBN 0
+#define FRF_AB_XM_ADR_HI_WIDTH 16
+
+
+/*
+ * FR_AB_XM_GLB_CFG_REG(128bit):
+ * XGMAC global configuration
+ */
+#define FR_AB_XM_GLB_CFG_REG_OFST 0x00001220
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_RMTFLT_GEN_LBN 17
+#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1
+#define FRF_AB_XM_DEBUG_MODE_LBN 16
+#define FRF_AB_XM_DEBUG_MODE_WIDTH 1
+#define FRF_AB_XM_RX_STAT_EN_LBN 11
+#define FRF_AB_XM_RX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_TX_STAT_EN_LBN 10
+#define FRF_AB_XM_TX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6
+#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_WAN_MODE_LBN 5
+#define FRF_AB_XM_WAN_MODE_WIDTH 1
+#define FRF_AB_XM_INTCLR_MODE_LBN 3
+#define FRF_AB_XM_INTCLR_MODE_WIDTH 1
+#define FRF_AB_XM_CORE_RST_LBN 0
+#define FRF_AB_XM_CORE_RST_WIDTH 1
+
+
+/*
+ * FR_AB_XM_TX_CFG_REG(128bit):
+ * XGMAC transmit configuration
+ */
+#define FR_AB_XM_TX_CFG_REG_OFST 0x00001230
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_TX_PROG_LBN 24
+#define FRF_AB_XM_TX_PROG_WIDTH 1
+#define FRF_AB_XM_IPG_LBN 16
+#define FRF_AB_XM_IPG_WIDTH 4
+#define FRF_AB_XM_FCNTL_LBN 10
+#define FRF_AB_XM_FCNTL_WIDTH 1
+#define FRF_AB_XM_TXCRC_LBN 8
+#define FRF_AB_XM_TXCRC_WIDTH 1
+#define FRF_AB_XM_EDRC_LBN 6
+#define FRF_AB_XM_EDRC_WIDTH 1
+#define FRF_AB_XM_AUTO_PAD_LBN 5
+#define FRF_AB_XM_AUTO_PAD_WIDTH 1
+#define FRF_AB_XM_TX_PRMBL_LBN 2
+#define FRF_AB_XM_TX_PRMBL_WIDTH 1
+#define FRF_AB_XM_TXEN_LBN 1
+#define FRF_AB_XM_TXEN_WIDTH 1
+#define FRF_AB_XM_TX_RST_LBN 0
+#define FRF_AB_XM_TX_RST_WIDTH 1
+
+
+/*
+ * FR_AB_XM_RX_CFG_REG(128bit):
+ * XGMAC receive configuration
+ */
+#define FR_AB_XM_RX_CFG_REG_OFST 0x00001240
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_PASS_LENERR_LBN 26
+#define FRF_AB_XM_PASS_LENERR_WIDTH 1
+#define FRF_AB_XM_PASS_CRC_ERR_LBN 25
+#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1
+#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24
+#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_REJ_BCAST_LBN 20
+#define FRF_AB_XM_REJ_BCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11
+#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9
+#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1
+#define FRF_AB_XM_AUTO_DEPAD_LBN 8
+#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1
+#define FRF_AB_XM_RXCRC_LBN 3
+#define FRF_AB_XM_RXCRC_WIDTH 1
+#define FRF_AB_XM_RX_PRMBL_LBN 2
+#define FRF_AB_XM_RX_PRMBL_WIDTH 1
+#define FRF_AB_XM_RXEN_LBN 1
+#define FRF_AB_XM_RXEN_WIDTH 1
+#define FRF_AB_XM_RX_RST_LBN 0
+#define FRF_AB_XM_RX_RST_WIDTH 1
+
+
+/*
+ * FR_AB_XM_MGT_INT_MASK(128bit):
+ * documentation to be written for sum_XM_MGT_INT_MASK
+ */
+#define FR_AB_XM_MGT_INT_MASK_OFST 0x00001250
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_MSK_STA_INTR_LBN 16
+#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_MSK_RMTFLT_LBN 1
+#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1
+#define FRF_AB_XM_MSK_LCLFLT_LBN 0
+#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1
+
+
+/*
+ * FR_AB_XM_FC_REG(128bit):
+ * XGMAC flow control register
+ */
+#define FR_AB_XM_FC_REG_OFST 0x00001270
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_PAUSE_TIME_LBN 16
+#define FRF_AB_XM_PAUSE_TIME_WIDTH 16
+#define FRF_AB_XM_RX_MAC_STAT_LBN 11
+#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_TX_MAC_STAT_LBN 10
+#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_MCNTL_PASS_LBN 8
+#define FRF_AB_XM_MCNTL_PASS_WIDTH 2
+#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6
+#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1
+#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5
+#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1
+#define FRF_AB_XM_ZPAUSE_LBN 2
+#define FRF_AB_XM_ZPAUSE_WIDTH 1
+#define FRF_AB_XM_XMIT_PAUSE_LBN 1
+#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1
+#define FRF_AB_XM_DIS_FCNTL_LBN 0
+#define FRF_AB_XM_DIS_FCNTL_WIDTH 1
+
+
+/*
+ * FR_AB_XM_PAUSE_TIME_REG(128bit):
+ * XGMAC pause time register
+ */
+#define FR_AB_XM_PAUSE_TIME_REG_OFST 0x00001290
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16
+#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16
+#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0
+#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16
+
+
+/*
+ * FR_AB_XM_TX_PARAM_REG(128bit):
+ * XGMAC transmit parameter register
+ */
+#define FR_AB_XM_TX_PARAM_REG_OFST 0x000012d0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31
+#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3
+#define FRF_AB_XM_PAD_CHAR_LBN 0
+#define FRF_AB_XM_PAD_CHAR_WIDTH 8
+
+
+/*
+ * FR_AB_XM_RX_PARAM_REG(128bit):
+ * XGMAC receive parameter register
+ */
+#define FR_AB_XM_RX_PARAM_REG_OFST 0x000012e0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3
+
+
+/*
+ * FR_AB_XM_MGT_INT_MSK_REG(128bit):
+ * XGMAC management interrupt mask register
+ */
+#define FR_AB_XM_MGT_INT_REG_OFST 0x000012f0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_STAT_CNTR_OF_LBN 9
+#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_STAT_CNTR_HF_LBN 8
+#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_RMTFLT_LBN 1
+#define FRF_AB_XM_RMTFLT_WIDTH 1
+#define FRF_AB_XM_LCLFLT_LBN 0
+#define FRF_AB_XM_LCLFLT_WIDTH 1
+
+
+/*
+ * FR_AB_XX_PWR_RST_REG(128bit):
+ * XGXS/XAUI powerdown/reset register
+ */
+#define FR_AB_XX_PWR_RST_REG_OFST 0x00001300
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_PWRDND_SIG_LBN 31
+#define FRF_AB_XX_PWRDND_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNC_SIG_LBN 30
+#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNB_SIG_LBN 29
+#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNA_SIG_LBN 28
+#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1
+#define FRF_AB_XX_SIM_MODE_LBN 27
+#define FRF_AB_XX_SIM_MODE_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25
+#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24
+#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETD_SIG_LBN 23
+#define FRF_AB_XX_RESETD_SIG_WIDTH 1
+#define FRF_AB_XX_RESETC_SIG_LBN 22
+#define FRF_AB_XX_RESETC_SIG_WIDTH 1
+#define FRF_AB_XX_RESETB_SIG_LBN 21
+#define FRF_AB_XX_RESETB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETA_SIG_LBN 20
+#define FRF_AB_XX_RESETA_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18
+#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17
+#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1
+#define FRF_AB_XX_SD_RST_ACT_LBN 16
+#define FRF_AB_XX_SD_RST_ACT_WIDTH 1
+#define FRF_AB_XX_PWRDND_EN_LBN 15
+#define FRF_AB_XX_PWRDND_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNC_EN_LBN 14
+#define FRF_AB_XX_PWRDNC_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNB_EN_LBN 13
+#define FRF_AB_XX_PWRDNB_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNA_EN_LBN 12
+#define FRF_AB_XX_PWRDNA_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_EN_LBN 9
+#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_EN_LBN 8
+#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1
+#define FRF_AB_XX_RESETD_EN_LBN 7
+#define FRF_AB_XX_RESETD_EN_WIDTH 1
+#define FRF_AB_XX_RESETC_EN_LBN 6
+#define FRF_AB_XX_RESETC_EN_WIDTH 1
+#define FRF_AB_XX_RESETB_EN_LBN 5
+#define FRF_AB_XX_RESETB_EN_WIDTH 1
+#define FRF_AB_XX_RESETA_EN_LBN 4
+#define FRF_AB_XX_RESETA_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2
+#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1
+#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1
+#define FRF_AB_XX_RST_XX_EN_LBN 0
+#define FRF_AB_XX_RST_XX_EN_WIDTH 1
+
+
+/*
+ * FR_AB_XX_SD_CTL_REG(128bit):
+ * XGXS/XAUI powerdown/reset control register
+ */
+#define FR_AB_XX_SD_CTL_REG_OFST 0x00001310
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_TERMADJ1_LBN 17
+#define FRF_AB_XX_TERMADJ1_WIDTH 1
+#define FRF_AB_XX_TERMADJ0_LBN 16
+#define FRF_AB_XX_TERMADJ0_WIDTH 1
+#define FRF_AB_XX_HIDRVD_LBN 15
+#define FRF_AB_XX_HIDRVD_WIDTH 1
+#define FRF_AB_XX_LODRVD_LBN 14
+#define FRF_AB_XX_LODRVD_WIDTH 1
+#define FRF_AB_XX_HIDRVC_LBN 13
+#define FRF_AB_XX_HIDRVC_WIDTH 1
+#define FRF_AB_XX_LODRVC_LBN 12
+#define FRF_AB_XX_LODRVC_WIDTH 1
+#define FRF_AB_XX_HIDRVB_LBN 11
+#define FRF_AB_XX_HIDRVB_WIDTH 1
+#define FRF_AB_XX_LODRVB_LBN 10
+#define FRF_AB_XX_LODRVB_WIDTH 1
+#define FRF_AB_XX_HIDRVA_LBN 9
+#define FRF_AB_XX_HIDRVA_WIDTH 1
+#define FRF_AB_XX_LODRVA_LBN 8
+#define FRF_AB_XX_LODRVA_WIDTH 1
+#define FRF_AB_XX_LPBKD_LBN 3
+#define FRF_AB_XX_LPBKD_WIDTH 1
+#define FRF_AB_XX_LPBKC_LBN 2
+#define FRF_AB_XX_LPBKC_WIDTH 1
+#define FRF_AB_XX_LPBKB_LBN 1
+#define FRF_AB_XX_LPBKB_WIDTH 1
+#define FRF_AB_XX_LPBKA_LBN 0
+#define FRF_AB_XX_LPBKA_WIDTH 1
+
+
+/*
+ * FR_AB_XX_TXDRV_CTL_REG(128bit):
+ * XAUI SerDes transmit drive control register
+ */
+#define FR_AB_XX_TXDRV_CTL_REG_OFST 0x00001320
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_DEQD_LBN 28
+#define FRF_AB_XX_DEQD_WIDTH 4
+#define FRF_AB_XX_DEQC_LBN 24
+#define FRF_AB_XX_DEQC_WIDTH 4
+#define FRF_AB_XX_DEQB_LBN 20
+#define FRF_AB_XX_DEQB_WIDTH 4
+#define FRF_AB_XX_DEQA_LBN 16
+#define FRF_AB_XX_DEQA_WIDTH 4
+#define FRF_AB_XX_DTXD_LBN 12
+#define FRF_AB_XX_DTXD_WIDTH 4
+#define FRF_AB_XX_DTXC_LBN 8
+#define FRF_AB_XX_DTXC_WIDTH 4
+#define FRF_AB_XX_DTXB_LBN 4
+#define FRF_AB_XX_DTXB_WIDTH 4
+#define FRF_AB_XX_DTXA_LBN 0
+#define FRF_AB_XX_DTXA_WIDTH 4
+
+
+/*
+ * FR_AB_XX_PRBS_CTL_REG(128bit):
+ * documentation to be written for sum_XX_PRBS_CTL_REG
+ */
+#define FR_AB_XX_PRBS_CTL_REG_OFST 0x00001330
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29
+#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25
+#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21
+#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17
+#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13
+#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9
+#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5
+#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1
+#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1
+
+
+/*
+ * FR_AB_XX_PRBS_CHK_REG(128bit):
+ * documentation to be written for sum_XX_PRBS_CHK_REG
+ */
+#define FR_AB_XX_PRBS_CHK_REG_OFST 0x00001340
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_REV_LB_EN_LBN 16
+#define FRF_AB_XX_REV_LB_EN_WIDTH 1
+#define FRF_AB_XX_CH3_DEG_DET_LBN 15
+#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13
+#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH3_ERR_CHK_LBN 12
+#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH2_DEG_DET_LBN 11
+#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9
+#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH2_ERR_CHK_LBN 8
+#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH1_DEG_DET_LBN 7
+#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5
+#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH1_ERR_CHK_LBN 4
+#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH0_DEG_DET_LBN 3
+#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH0_ERR_CHK_LBN 0
+#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1
+
+
+/*
+ * FR_AB_XX_PRBS_ERR_REG(128bit):
+ * documentation to be written for sum_XX_PRBS_ERR_REG
+ */
+#define FR_AB_XX_PRBS_ERR_REG_OFST 0x00001350
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8
+
+
+/*
+ * FR_AB_XX_CORE_STAT_REG(128bit):
+ * XAUI XGXS core status register
+ */
+#define FR_AB_XX_CORE_STAT_REG_OFST 0x00001360
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_FORCE_SIG3_LBN 31
+#define FRF_AB_XX_FORCE_SIG3_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30
+#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_LBN 29
+#define FRF_AB_XX_FORCE_SIG2_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28
+#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_LBN 27
+#define FRF_AB_XX_FORCE_SIG1_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26
+#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_LBN 25
+#define FRF_AB_XX_FORCE_SIG0_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24
+#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1
+#define FRF_AB_XX_XGXS_LB_EN_LBN 23
+#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1
+#define FRF_AB_XX_XGMII_LB_EN_LBN 22
+#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1
+#define FRF_AB_XX_MATCH_FAULT_LBN 21
+#define FRF_AB_XX_MATCH_FAULT_WIDTH 1
+#define FRF_AB_XX_ALIGN_DONE_LBN 20
+#define FRF_AB_XX_ALIGN_DONE_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT3_LBN 19
+#define FRF_AB_XX_SYNC_STAT3_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT2_LBN 18
+#define FRF_AB_XX_SYNC_STAT2_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT1_LBN 17
+#define FRF_AB_XX_SYNC_STAT1_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT0_LBN 16
+#define FRF_AB_XX_SYNC_STAT0_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH3_LBN 15
+#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH2_LBN 14
+#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH1_LBN 13
+#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH0_LBN 12
+#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11
+#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10
+#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9
+#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8
+#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7
+#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6
+#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5
+#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4
+#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH3_LBN 3
+#define FRF_AB_XX_DISPERR_CH3_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH2_LBN 2
+#define FRF_AB_XX_DISPERR_CH2_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH1_LBN 1
+#define FRF_AB_XX_DISPERR_CH1_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH0_LBN 0
+#define FRF_AB_XX_DISPERR_CH0_WIDTH 1
+
+
+/*
+ * FR_AA_RX_DESC_PTR_TBL_KER(128bit):
+ * Receive descriptor pointer table
+ */
+#define FR_AA_RX_DESC_PTR_TBL_KER_OFST 0x00011800
+/* falcona0=net_func_bar2 */
+#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4
+/*
+ * FR_AZ_RX_DESC_PTR_TBL(128bit):
+ * Receive descriptor pointer table
+ */
+#define FR_AZ_RX_DESC_PTR_TBL_OFST 0x00f40000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_RX_DESC_PTR_TBL_STEP 16
+#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024
+#define FR_AB_RX_DESC_PTR_TBL_ROWS 4096
+
+#define FRF_CZ_RX_HDR_SPLIT_LBN 90
+#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1
+#define FRF_AZ_RX_RESET_LBN 89
+#define FRF_AZ_RX_RESET_WIDTH 1
+#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88
+#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87
+#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86
+#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1
+#define FRF_AZ_RX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_RX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_RX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_RX_DESCQ_SIZE_4K 3
+#define FFE_AZ_RX_DESCQ_SIZE_2K 2
+#define FFE_AZ_RX_DESCQ_SIZE_1K 1
+#define FFE_AZ_RX_DESCQ_SIZE_512 0
+#define FRF_AZ_RX_DESCQ_TYPE_LBN 2
+#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1
+#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1
+#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1
+#define FRF_AZ_RX_DESCQ_EN_LBN 0
+#define FRF_AZ_RX_DESCQ_EN_WIDTH 1
+
+
+/*
+ * FR_AA_TX_DESC_PTR_TBL_KER(128bit):
+ * Transmit descriptor pointer
+ */
+#define FR_AA_TX_DESC_PTR_TBL_KER_OFST 0x00011900
+/* falcona0=net_func_bar2 */
+#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8
+/*
+ * FR_AZ_TX_DESC_PTR_TBL(128bit):
+ * Transmit descriptor pointer
+ */
+#define FR_AZ_TX_DESC_PTR_TBL_OFST 0x00f50000
+/* falconb0=net_func_bar2,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_TX_DESC_PTR_TBL_STEP 16
+#define FR_AB_TX_DESC_PTR_TBL_ROWS 4096
+#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024
+
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1
+#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92
+#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1
+#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91
+#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90
+#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1
+#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89
+#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1
+#define FRF_AZ_TX_DESCQ_EN_LBN 88
+#define FRF_AZ_TX_DESCQ_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87
+#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86
+#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_TX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_TX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_TX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_TX_DESCQ_SIZE_4K 3
+#define FFE_AZ_TX_DESCQ_SIZE_2K 2
+#define FFE_AZ_TX_DESCQ_SIZE_1K 1
+#define FFE_AZ_TX_DESCQ_SIZE_512 0
+#define FRF_AZ_TX_DESCQ_TYPE_LBN 1
+#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2
+#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0
+#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1
+
+
+/*
+ * FR_AA_EVQ_PTR_TBL_KER(128bit):
+ * Event queue pointer table
+ */
+#define FR_AA_EVQ_PTR_TBL_KER_OFST 0x00011a00
+/* falcona0=net_func_bar2 */
+#define FR_AA_EVQ_PTR_TBL_KER_STEP 16
+#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4
+/*
+ * FR_AZ_EVQ_PTR_TBL(128bit):
+ * Event queue pointer table
+ */
+#define FR_AZ_EVQ_PTR_TBL_OFST 0x00f60000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_EVQ_PTR_TBL_STEP 16
+#define FR_CZ_EVQ_PTR_TBL_ROWS 1024
+#define FR_AB_EVQ_PTR_TBL_ROWS 4096
+
+#define FRF_BZ_EVQ_RPTR_IGN_LBN 40
+#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1
+#define FRF_AZ_EVQ_WKUP_OR_INT_EN_LBN 39
+#define FRF_AZ_EVQ_WKUP_OR_INT_EN_WIDTH 1
+#define FRF_AZ_EVQ_NXT_WPTR_LBN 24
+#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15
+#define FRF_AZ_EVQ_EN_LBN 23
+#define FRF_AZ_EVQ_EN_WIDTH 1
+#define FRF_AZ_EVQ_SIZE_LBN 20
+#define FRF_AZ_EVQ_SIZE_WIDTH 3
+#define FFE_AZ_EVQ_SIZE_32K 6
+#define FFE_AZ_EVQ_SIZE_16K 5
+#define FFE_AZ_EVQ_SIZE_8K 4
+#define FFE_AZ_EVQ_SIZE_4K 3
+#define FFE_AZ_EVQ_SIZE_2K 2
+#define FFE_AZ_EVQ_SIZE_1K 1
+#define FFE_AZ_EVQ_SIZE_512 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20
+
+
+/*
+ * FR_AA_BUF_HALF_TBL_KER(64bit):
+ * Buffer table in half buffer table mode direct access by driver
+ */
+#define FR_AA_BUF_HALF_TBL_KER_OFST 0x00018000
+/* falcona0=net_func_bar2 */
+#define FR_AA_BUF_HALF_TBL_KER_STEP 8
+#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096
+/*
+ * FR_AZ_BUF_HALF_TBL(64bit):
+ * Buffer table in half buffer table mode direct access by driver
+ */
+#define FR_AZ_BUF_HALF_TBL_OFST 0x00800000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_BUF_HALF_TBL_STEP 8
+#define FR_CZ_BUF_HALF_TBL_ROWS 147456
+#define FR_AB_BUF_HALF_TBL_ROWS 524288
+
+#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44
+#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
+
+
+/*
+ * FR_AA_BUF_FULL_TBL_KER(64bit):
+ * Buffer table in full buffer table mode direct access by driver
+ */
+#define FR_AA_BUF_FULL_TBL_KER_OFST 0x00018000
+/* falcona0=net_func_bar2 */
+#define FR_AA_BUF_FULL_TBL_KER_STEP 8
+#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096
+/*
+ * FR_AZ_BUF_FULL_TBL(64bit):
+ * Buffer table in full buffer table mode direct access by driver
+ */
+#define FR_AZ_BUF_FULL_TBL_OFST 0x00800000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_BUF_FULL_TBL_STEP 8
+
+#define FR_CZ_BUF_FULL_TBL_ROWS 147456
+#define FR_AB_BUF_FULL_TBL_ROWS 917504
+
+#define FRF_AZ_BUF_FULL_UNUSED_LBN 51
+#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13
+#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50
+#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1
+#define FRF_AZ_BUF_ADR_REGION_LBN 48
+#define FRF_AZ_BUF_ADR_REGION_WIDTH 2
+#define FFE_AZ_BUF_ADR_REGN3 3
+#define FFE_AZ_BUF_ADR_REGN2 2
+#define FFE_AZ_BUF_ADR_REGN1 1
+#define FFE_AZ_BUF_ADR_REGN0 0
+#define FRF_AZ_BUF_ADR_FBUF_LBN 14
+#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34
+#define FRF_AZ_BUF_ADR_FBUF_DW0_LBN 14
+#define FRF_AZ_BUF_ADR_FBUF_DW0_WIDTH 32
+#define FRF_AZ_BUF_ADR_FBUF_DW1_LBN 46
+#define FRF_AZ_BUF_ADR_FBUF_DW1_WIDTH 2
+#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14
+
+
+/*
+ * FR_AZ_RX_FILTER_TBL0(128bit):
+ * TCP/IPv4 Receive filter table
+ */
+#define FR_AZ_RX_FILTER_TBL0_OFST 0x00f00000
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_RX_FILTER_TBL0_STEP 32
+#define FR_AZ_RX_FILTER_TBL0_ROWS 8192
+/*
+ * FR_AB_RX_FILTER_TBL1(128bit):
+ * TCP/IPv4 Receive filter table
+ */
+#define FR_AB_RX_FILTER_TBL1_OFST 0x00f00010
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_RX_FILTER_TBL1_STEP 32
+#define FR_AB_RX_FILTER_TBL1_ROWS 8192
+
+#define FRF_BZ_RSS_EN_LBN 110
+#define FRF_BZ_RSS_EN_WIDTH 1
+#define FRF_BZ_SCATTER_EN_LBN 109
+#define FRF_BZ_SCATTER_EN_WIDTH 1
+#define FRF_AZ_TCP_UDP_LBN 108
+#define FRF_AZ_TCP_UDP_WIDTH 1
+#define FRF_AZ_RXQ_ID_LBN 96
+#define FRF_AZ_RXQ_ID_WIDTH 12
+#define FRF_AZ_DEST_IP_LBN 64
+#define FRF_AZ_DEST_IP_WIDTH 32
+#define FRF_AZ_DEST_PORT_TCP_LBN 48
+#define FRF_AZ_DEST_PORT_TCP_WIDTH 16
+#define FRF_AZ_SRC_IP_LBN 16
+#define FRF_AZ_SRC_IP_WIDTH 32
+#define FRF_AZ_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_AZ_SRC_TCP_DEST_UDP_WIDTH 16
+
+
+/*
+ * FR_CZ_RX_MAC_FILTER_TBL0(128bit):
+ * Receive Ethernet filter table
+ */
+#define FR_CZ_RX_MAC_FILTER_TBL0_OFST 0x00f00010
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32
+#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512
+
+#define FRF_CZ_RMFT_RSS_EN_LBN 75
+#define FRF_CZ_RMFT_RSS_EN_WIDTH 1
+#define FRF_CZ_RMFT_SCATTER_EN_LBN 74
+#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1
+#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73
+#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_RMFT_RXQ_ID_LBN 61
+#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12
+#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_RMFT_DEST_MAC_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48
+#define FRF_CZ_RMFT_DEST_MAC_DW0_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_DW0_WIDTH 32
+#define FRF_CZ_RMFT_DEST_MAC_DW1_LBN 44
+#define FRF_CZ_RMFT_DEST_MAC_DW1_WIDTH 16
+#define FRF_CZ_RMFT_VLAN_ID_LBN 0
+#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12
+
+
+/*
+ * FR_AZ_TIMER_TBL(128bit):
+ * Timer table
+ */
+#define FR_AZ_TIMER_TBL_OFST 0x00f70000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_TIMER_TBL_STEP 16
+#define FR_CZ_TIMER_TBL_ROWS 1024
+#define FR_AB_TIMER_TBL_ROWS 4096
+
+#define FRF_CZ_TIMER_Q_EN_LBN 33
+#define FRF_CZ_TIMER_Q_EN_WIDTH 1
+#define FRF_CZ_INT_ARMD_LBN 32
+#define FRF_CZ_INT_ARMD_WIDTH 1
+#define FRF_CZ_INT_PEND_LBN 31
+#define FRF_CZ_INT_PEND_WIDTH 1
+#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30
+#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1
+#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16
+#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14
+#define FRF_CZ_TIMER_MODE_LBN 14
+#define FRF_CZ_TIMER_MODE_WIDTH 2
+#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3
+#define FFE_CZ_TIMER_MODE_TRIG_START 2
+#define FFE_CZ_TIMER_MODE_IMMED_START 1
+#define FFE_CZ_TIMER_MODE_DIS 0
+#define FRF_AB_TIMER_MODE_LBN 12
+#define FRF_AB_TIMER_MODE_WIDTH 2
+#define FFE_AB_TIMER_MODE_INT_HLDOFF 2
+#define FFE_AB_TIMER_MODE_TRIG_START 2
+#define FFE_AB_TIMER_MODE_IMMED_START 1
+#define FFE_AB_TIMER_MODE_DIS 0
+#define FRF_CZ_TIMER_VAL_LBN 0
+#define FRF_CZ_TIMER_VAL_WIDTH 14
+#define FRF_AB_TIMER_VAL_LBN 0
+#define FRF_AB_TIMER_VAL_WIDTH 12
+
+
+/*
+ * FR_BZ_TX_PACE_TBL(128bit):
+ * Transmit pacing table
+ */
+#define FR_BZ_TX_PACE_TBL_OFST 0x00f80000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2 */
+#define FR_AZ_TX_PACE_TBL_STEP 16
+#define FR_CZ_TX_PACE_TBL_ROWS 1024
+#define FR_BB_TX_PACE_TBL_ROWS 4096
+/*
+ * FR_AA_TX_PACE_TBL(128bit):
+ * Transmit pacing table
+ */
+#define FR_AA_TX_PACE_TBL_OFST 0x00f80040
+/* falcona0=char_func_bar0 */
+/* FR_AZ_TX_PACE_TBL_STEP 16 */
+#define FR_AA_TX_PACE_TBL_ROWS 4092
+
+#define FRF_AZ_TX_PACE_LBN 0
+#define FRF_AZ_TX_PACE_WIDTH 5
+
+
+/*
+ * FR_BZ_RX_INDIRECTION_TBL(7bit):
+ * RX Indirection Table
+ */
+#define FR_BZ_RX_INDIRECTION_TBL_OFST 0x00fb0000
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_RX_INDIRECTION_TBL_STEP 16
+#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128
+
+#define FRF_BZ_IT_QUEUE_LBN 0
+#define FRF_BZ_IT_QUEUE_WIDTH 6
+
+
+/*
+ * FR_CZ_TX_FILTER_TBL0(128bit):
+ * TCP/IPv4 Transmit filter table
+ */
+#define FR_CZ_TX_FILTER_TBL0_OFST 0x00fc0000
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_TX_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_FILTER_TBL0_ROWS 8192
+
+#define FRF_CZ_TIFT_TCP_UDP_LBN 108
+#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1
+#define FRF_CZ_TIFT_TXQ_ID_LBN 96
+#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TIFT_DEST_IP_LBN 64
+#define FRF_CZ_TIFT_DEST_IP_WIDTH 32
+#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48
+#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16
+#define FRF_CZ_TIFT_SRC_IP_LBN 16
+#define FRF_CZ_TIFT_SRC_IP_WIDTH 32
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16
+
+
+/*
+ * FR_CZ_TX_MAC_FILTER_TBL0(128bit):
+ * Transmit Ethernet filter table
+ */
+#define FR_CZ_TX_MAC_FILTER_TBL0_OFST 0x00fe0000
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512
+
+#define FRF_CZ_TMFT_TXQ_ID_LBN 61
+#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_TMFT_SRC_MAC_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48
+#define FRF_CZ_TMFT_SRC_MAC_DW0_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_DW0_WIDTH 32
+#define FRF_CZ_TMFT_SRC_MAC_DW1_LBN 44
+#define FRF_CZ_TMFT_SRC_MAC_DW1_WIDTH 16
+#define FRF_CZ_TMFT_VLAN_ID_LBN 0
+#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12
+
+
+/*
+ * FR_CZ_MC_TREG_SMEM(32bit):
+ * MC Shared Memory
+ */
+#define FR_CZ_MC_TREG_SMEM_OFST 0x00ff0000
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_MC_TREG_SMEM_STEP 4
+#define FR_CZ_MC_TREG_SMEM_ROWS 512
+
+#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0
+#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32
+
+
+/*
+ * FR_BB_MSIX_VECTOR_TABLE(128bit):
+ * MSIX Vector Table
+ */
+#define FR_BB_MSIX_VECTOR_TABLE_OFST 0x00ff0000
+/* falconb0=net_func_bar2 */
+#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16
+#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64
+/*
+ * FR_CZ_MSIX_VECTOR_TABLE(128bit):
+ * MSIX Vector Table
+ */
+#define FR_CZ_MSIX_VECTOR_TABLE_OFST 0x00000000
+/* sienaa0=pci_f0_bar4 */
+/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */
+#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024
+
+#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97
+#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31
+#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96
+#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1
+#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64
+#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32
+
+
+/*
+ * FR_BB_MSIX_PBA_TABLE(32bit):
+ * MSIX Pending Bit Array
+ */
+#define FR_BB_MSIX_PBA_TABLE_OFST 0x00ff2000
+/* falconb0=net_func_bar2 */
+#define FR_BZ_MSIX_PBA_TABLE_STEP 4
+#define FR_BB_MSIX_PBA_TABLE_ROWS 2
+/*
+ * FR_CZ_MSIX_PBA_TABLE(32bit):
+ * MSIX Pending Bit Array
+ */
+#define FR_CZ_MSIX_PBA_TABLE_OFST 0x00008000
+/* sienaa0=pci_f0_bar4 */
+/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */
+#define FR_CZ_MSIX_PBA_TABLE_ROWS 32
+
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+
+/*
+ * FR_AZ_SRM_DBG_REG(64bit):
+ * SRAM debug access
+ */
+#define FR_AZ_SRM_DBG_REG_OFST 0x03000000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_SRM_DBG_REG_STEP 8
+
+#define FR_CZ_SRM_DBG_REG_ROWS 262144
+#define FR_AB_SRM_DBG_REG_ROWS 2097152
+
+#define FRF_AZ_SRM_DBG_LBN 0
+#define FRF_AZ_SRM_DBG_WIDTH 64
+#define FRF_AZ_SRM_DBG_DW0_LBN 0
+#define FRF_AZ_SRM_DBG_DW0_WIDTH 32
+#define FRF_AZ_SRM_DBG_DW1_LBN 32
+#define FRF_AZ_SRM_DBG_DW1_WIDTH 32
+
+
+/*
+ * FR_AA_INT_ACK_CHAR(32bit):
+ * CHAR interrupt acknowledge register
+ */
+#define FR_AA_INT_ACK_CHAR_OFST 0x00000060
+/* falcona0=char_func_bar0 */
+
+#define FRF_AA_INT_ACK_CHAR_FIELD_LBN 0
+#define FRF_AA_INT_ACK_CHAR_FIELD_WIDTH 32
+
+
+/* FS_DRIVER_EV */
+#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56
+#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4
+#define FSE_AZ_TX_DSC_ERROR_EV 15
+#define FSE_AZ_RX_DSC_ERROR_EV 14
+#define FSE_AZ_RX_RECOVER_EV 11
+#define FSE_AZ_TIMER_EV 10
+#define FSE_AZ_TX_PKT_NON_TCP_UDP 9
+#define FSE_AZ_WAKE_UP_EV 6
+#define FSE_AZ_SRM_UPD_DONE_EV 5
+#define FSE_AZ_EVQ_NOT_EN_EV 3
+#define FSE_AZ_EVQ_INIT_DONE_EV 2
+#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1
+#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14
+
+
+/* FS_EVENT_ENTRY */
+#define FSF_AZ_EV_CODE_LBN 60
+#define FSF_AZ_EV_CODE_WIDTH 4
+#define FSE_AZ_EV_CODE_USER_EV 8
+#define FSE_AZ_EV_CODE_DRV_GEN_EV 7
+#define FSE_AZ_EV_CODE_GLOBAL_EV 6
+#define FSE_AZ_EV_CODE_DRIVER_EV 5
+#define FSE_AZ_EV_CODE_TX_EV 2
+#define FSE_AZ_EV_CODE_RX_EV 0
+#define FSF_AZ_EV_DATA_LBN 0
+#define FSF_AZ_EV_DATA_WIDTH 60
+#define FSF_AZ_EV_DATA_DW0_LBN 0
+#define FSF_AZ_EV_DATA_DW0_WIDTH 32
+#define FSF_AZ_EV_DATA_DW1_LBN 32
+#define FSF_AZ_EV_DATA_DW1_WIDTH 28
+
+
+/* FS_GLOBAL_EV */
+#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 12
+#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1
+#define FSF_BZ_GLB_EV_XG_MNT_INTR_LBN 11
+#define FSF_BZ_GLB_EV_XG_MNT_INTR_WIDTH 1
+#define FSF_AZ_GLB_EV_XFP_PHY0_INTR_LBN 10
+#define FSF_AZ_GLB_EV_XFP_PHY0_INTR_WIDTH 1
+#define FSF_AZ_GLB_EV_XG_PHY0_INTR_LBN 9
+#define FSF_AZ_GLB_EV_XG_PHY0_INTR_WIDTH 1
+#define FSF_AZ_GLB_EV_G_PHY0_INTR_LBN 7
+#define FSF_AZ_GLB_EV_G_PHY0_INTR_WIDTH 1
+
+
+/* FS_RX_EV */
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1
+#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57
+#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_OK_LBN 56
+#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49
+#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1
+#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47
+#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44
+#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3
+#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2
+#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1
+#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0
+#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42
+#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2
+#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_OTHER 2
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_UDP 1
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_TCP 0
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39
+#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1
+#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37
+#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1
+#define FSF_AZ_RX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31
+#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1
+#define FSF_AZ_RX_EV_PORT_LBN 30
+#define FSF_AZ_RX_EV_PORT_WIDTH 1
+#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16
+#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14
+#define FSF_AZ_RX_EV_SOP_LBN 15
+#define FSF_AZ_RX_EV_SOP_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12
+
+
+/* FS_RX_KER_DESC */
+#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48
+#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14
+#define FSF_AZ_RX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46
+#define FSF_AZ_RX_KER_BUF_ADDR_DW0_LBN 0
+#define FSF_AZ_RX_KER_BUF_ADDR_DW0_WIDTH 32
+#define FSF_AZ_RX_KER_BUF_ADDR_DW1_LBN 32
+#define FSF_AZ_RX_KER_BUF_ADDR_DW1_WIDTH 14
+
+
+/* FS_RX_USER_DESC */
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12
+#define FSF_AZ_RX_USER_BUF_ID_LBN 0
+#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20
+
+
+/* FS_TX_EV */
+#define FSF_AZ_TX_EV_PKT_ERR_LBN 38
+#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1
+#define FSF_AZ_TX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_TX_EV_PORT_LBN 16
+#define FSF_AZ_TX_EV_PORT_WIDTH 1
+#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15
+#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_COMP_LBN 12
+#define FSF_AZ_TX_EV_COMP_WIDTH 1
+#define FSF_AZ_TX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12
+
+
+/* FS_TX_KER_DESC */
+#define FSF_AZ_TX_KER_CONT_LBN 62
+#define FSF_AZ_TX_KER_CONT_WIDTH 1
+#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48
+#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14
+#define FSF_AZ_TX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46
+#define FSF_AZ_TX_KER_BUF_ADDR_DW0_LBN 0
+#define FSF_AZ_TX_KER_BUF_ADDR_DW0_WIDTH 32
+#define FSF_AZ_TX_KER_BUF_ADDR_DW1_LBN 32
+#define FSF_AZ_TX_KER_BUF_ADDR_DW1_WIDTH 14
+
+
+/* FS_TX_USER_DESC */
+#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48
+#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1
+#define FSF_AZ_TX_USER_CONT_LBN 46
+#define FSF_AZ_TX_USER_CONT_WIDTH 1
+#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33
+#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13
+#define FSF_AZ_TX_USER_BUF_ID_LBN 13
+#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20
+#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0
+#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13
+
+
+/* FS_USER_EV */
+#define FSF_CZ_USER_QID_LBN 32
+#define FSF_CZ_USER_QID_WIDTH 10
+#define FSF_CZ_USER_EV_REG_VALUE_LBN 0
+#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32
+
+
+/* FS_NET_IVEC */
+#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64
+#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1
+#define FSF_AZ_NET_IVEC_INT_Q_LBN 40
+#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4
+#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32
+#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1
+
+
+/* DRIVER_EV */
+/* Sub-fields of an RX flush completion event */
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12
+
+
+
+/**************************************************************************
+ *
+ * Falcon non-volatile configuration
+ *
+ **************************************************************************
+ */
+
+
+#define FR_AZ_TX_PACE_TBL_OFST FR_BZ_TX_PACE_TBL_OFST
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+
+
+#endif /* _SYS_EFX_REGS_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_regs_ef10.h b/src/seastar/dpdk/drivers/net/sfc/base/efx_regs_ef10.h
new file mode 100644
index 00000000..11a91848
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_regs_ef10.h
@@ -0,0 +1,571 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_EF10_REGS_H
+#define _SYS_EFX_EF10_REGS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**************************************************************************
+ * NOTE: the line below marks the start of the autogenerated section
+ * EF10 registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/*
+ * BIU_HW_REV_ID_REG(32bit):
+ *
+ */
+
+#define ER_DZ_BIU_HW_REV_ID_REG_OFST 0x00000000
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_BIU_HW_REV_ID_REG_RESET 0xeb14face
+
+
+#define ERF_DZ_HW_REV_ID_LBN 0
+#define ERF_DZ_HW_REV_ID_WIDTH 32
+
+
+/*
+ * BIU_MC_SFT_STATUS_REG(32bit):
+ *
+ */
+
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_OFST 0x00000010
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_STEP 4
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_ROWS 8
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_RESET 0x1111face
+
+
+#define ERF_DZ_MC_SFT_STATUS_LBN 0
+#define ERF_DZ_MC_SFT_STATUS_WIDTH 32
+
+
+/*
+ * BIU_INT_ISR_REG(32bit):
+ *
+ */
+
+#define ER_DZ_BIU_INT_ISR_REG_OFST 0x00000090
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_BIU_INT_ISR_REG_RESET 0x0
+
+
+#define ERF_DZ_ISR_REG_LBN 0
+#define ERF_DZ_ISR_REG_WIDTH 32
+
+
+/*
+ * MC_DB_LWRD_REG(32bit):
+ *
+ */
+
+#define ER_DZ_MC_DB_LWRD_REG_OFST 0x00000200
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_MC_DB_LWRD_REG_RESET 0x0
+
+
+#define ERF_DZ_MC_DOORBELL_L_LBN 0
+#define ERF_DZ_MC_DOORBELL_L_WIDTH 32
+
+
+/*
+ * MC_DB_HWRD_REG(32bit):
+ *
+ */
+
+#define ER_DZ_MC_DB_HWRD_REG_OFST 0x00000204
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_MC_DB_HWRD_REG_RESET 0x0
+
+
+#define ERF_DZ_MC_DOORBELL_H_LBN 0
+#define ERF_DZ_MC_DOORBELL_H_WIDTH 32
+
+
+/*
+ * EVQ_RPTR_REG(32bit):
+ *
+ */
+
+#define ER_DZ_EVQ_RPTR_REG_OFST 0x00000400
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_EVQ_RPTR_REG_STEP 8192
+#define ER_DZ_EVQ_RPTR_REG_ROWS 2048
+#define ER_DZ_EVQ_RPTR_REG_RESET 0x0
+
+
+#define ERF_DZ_EVQ_RPTR_VLD_LBN 15
+#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1
+#define ERF_DZ_EVQ_RPTR_LBN 0
+#define ERF_DZ_EVQ_RPTR_WIDTH 15
+
+
+/*
+ * EVQ_TMR_REG(32bit):
+ *
+ */
+
+#define ER_DZ_EVQ_TMR_REG_OFST 0x00000420
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_EVQ_TMR_REG_STEP 8192
+#define ER_DZ_EVQ_TMR_REG_ROWS 2048
+#define ER_DZ_EVQ_TMR_REG_RESET 0x0
+
+
+#define ERF_DZ_TC_TIMER_MODE_LBN 14
+#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
+#define ERF_DZ_TC_TIMER_VAL_LBN 0
+#define ERF_DZ_TC_TIMER_VAL_WIDTH 14
+
+
+/*
+ * RX_DESC_UPD_REG(32bit):
+ *
+ */
+
+#define ER_DZ_RX_DESC_UPD_REG_OFST 0x00000830
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_RX_DESC_UPD_REG_STEP 8192
+#define ER_DZ_RX_DESC_UPD_REG_ROWS 2048
+#define ER_DZ_RX_DESC_UPD_REG_RESET 0x0
+
+
+#define ERF_DZ_RX_DESC_WPTR_LBN 0
+#define ERF_DZ_RX_DESC_WPTR_WIDTH 12
+
+/*
+ * TX_DESC_UPD_REG(96bit):
+ *
+ */
+
+#define ER_DZ_TX_DESC_UPD_REG_OFST 0x00000a10
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_TX_DESC_UPD_REG_STEP 8192
+#define ER_DZ_TX_DESC_UPD_REG_ROWS 2048
+#define ER_DZ_TX_DESC_UPD_REG_RESET 0x0
+
+
+#define ERF_DZ_RSVD_LBN 76
+#define ERF_DZ_RSVD_WIDTH 20
+#define ERF_DZ_TX_DESC_WPTR_LBN 64
+#define ERF_DZ_TX_DESC_WPTR_WIDTH 12
+#define ERF_DZ_TX_DESC_HWORD_LBN 32
+#define ERF_DZ_TX_DESC_HWORD_WIDTH 32
+#define ERF_DZ_TX_DESC_LWORD_LBN 0
+#define ERF_DZ_TX_DESC_LWORD_WIDTH 32
+
+
+/* ES_DRIVER_EV */
+#define ESF_DZ_DRV_CODE_LBN 60
+#define ESF_DZ_DRV_CODE_WIDTH 4
+#define ESF_DZ_DRV_SUB_CODE_LBN 56
+#define ESF_DZ_DRV_SUB_CODE_WIDTH 4
+#define ESE_DZ_DRV_TIMER_EV 3
+#define ESE_DZ_DRV_START_UP_EV 2
+#define ESE_DZ_DRV_WAKE_UP_EV 1
+#define ESF_DZ_DRV_SUB_DATA_DW0_LBN 0
+#define ESF_DZ_DRV_SUB_DATA_DW0_WIDTH 32
+#define ESF_DZ_DRV_SUB_DATA_DW1_LBN 32
+#define ESF_DZ_DRV_SUB_DATA_DW1_WIDTH 24
+#define ESF_DZ_DRV_SUB_DATA_LBN 0
+#define ESF_DZ_DRV_SUB_DATA_WIDTH 56
+#define ESF_DZ_DRV_EVQ_ID_LBN 0
+#define ESF_DZ_DRV_EVQ_ID_WIDTH 14
+#define ESF_DZ_DRV_TMR_ID_LBN 0
+#define ESF_DZ_DRV_TMR_ID_WIDTH 14
+
+
+/* ES_EVENT_ENTRY */
+#define ESF_DZ_EV_CODE_LBN 60
+#define ESF_DZ_EV_CODE_WIDTH 4
+#define ESE_DZ_EV_CODE_MCDI_EV 12
+#define ESE_DZ_EV_CODE_DRIVER_EV 5
+#define ESE_DZ_EV_CODE_TX_EV 2
+#define ESE_DZ_EV_CODE_RX_EV 0
+#define ESE_DZ_OTHER other
+#define ESF_DZ_EV_DATA_DW0_LBN 0
+#define ESF_DZ_EV_DATA_DW0_WIDTH 32
+#define ESF_DZ_EV_DATA_DW1_LBN 32
+#define ESF_DZ_EV_DATA_DW1_WIDTH 28
+#define ESF_DZ_EV_DATA_LBN 0
+#define ESF_DZ_EV_DATA_WIDTH 60
+
+
+/* ES_MC_EVENT */
+#define ESF_DZ_MC_CODE_LBN 60
+#define ESF_DZ_MC_CODE_WIDTH 4
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_MC_DROP_EVENT_LBN 58
+#define ESF_DZ_MC_DROP_EVENT_WIDTH 1
+#define ESF_DZ_MC_SOFT_DW0_LBN 0
+#define ESF_DZ_MC_SOFT_DW0_WIDTH 32
+#define ESF_DZ_MC_SOFT_DW1_LBN 32
+#define ESF_DZ_MC_SOFT_DW1_WIDTH 26
+#define ESF_DZ_MC_SOFT_LBN 0
+#define ESF_DZ_MC_SOFT_WIDTH 58
+
+
+/* ES_RX_EVENT */
+#define ESF_DZ_RX_CODE_LBN 60
+#define ESF_DZ_RX_CODE_WIDTH 4
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_RX_DROP_EVENT_LBN 58
+#define ESF_DZ_RX_DROP_EVENT_WIDTH 1
+#define ESF_DD_RX_EV_RSVD2_LBN 54
+#define ESF_DD_RX_EV_RSVD2_WIDTH 4
+#define ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN 57
+#define ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN 56
+#define ESF_EZ_RX_IP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_RX_EV_RSVD2_LBN 54
+#define ESF_EZ_RX_EV_RSVD2_WIDTH 2
+#define ESF_DZ_RX_EV_SOFT2_LBN 52
+#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
+#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
+#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
+#define ESF_DZ_RX_L4_CLASS_LBN 45
+#define ESF_DZ_RX_L4_CLASS_WIDTH 3
+#define ESE_DZ_L4_CLASS_RSVD7 7
+#define ESE_DZ_L4_CLASS_RSVD6 6
+#define ESE_DZ_L4_CLASS_RSVD5 5
+#define ESE_DZ_L4_CLASS_RSVD4 4
+#define ESE_DZ_L4_CLASS_RSVD3 3
+#define ESE_DZ_L4_CLASS_UDP 2
+#define ESE_DZ_L4_CLASS_TCP 1
+#define ESE_DZ_L4_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_L3_CLASS_LBN 42
+#define ESF_DZ_RX_L3_CLASS_WIDTH 3
+#define ESE_DZ_L3_CLASS_RSVD7 7
+#define ESE_DZ_L3_CLASS_IP6_FRAG 6
+#define ESE_DZ_L3_CLASS_ARP 5
+#define ESE_DZ_L3_CLASS_IP4_FRAG 4
+#define ESE_DZ_L3_CLASS_FCOE 3
+#define ESE_DZ_L3_CLASS_IP6 2
+#define ESE_DZ_L3_CLASS_IP4 1
+#define ESE_DZ_L3_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39
+#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3
+#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7
+#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6
+#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5
+#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4
+#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3
+#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2
+#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1
+#define ESE_DZ_ETH_TAG_CLASS_NONE 0
+#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36
+#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3
+#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2
+#define ESE_DZ_ETH_BASE_CLASS_LLC 1
+#define ESE_DZ_ETH_BASE_CLASS_ETH2 0
+#define ESF_DZ_RX_MAC_CLASS_LBN 35
+#define ESF_DZ_RX_MAC_CLASS_WIDTH 1
+#define ESE_DZ_MAC_CLASS_MCAST 1
+#define ESE_DZ_MAC_CLASS_UCAST 0
+#define ESF_DD_RX_EV_SOFT1_LBN 32
+#define ESF_DD_RX_EV_SOFT1_WIDTH 3
+#define ESF_EZ_RX_EV_SOFT1_LBN 34
+#define ESF_EZ_RX_EV_SOFT1_WIDTH 1
+#define ESF_EZ_RX_ENCAP_HDR_LBN 32
+#define ESF_EZ_RX_ENCAP_HDR_WIDTH 2
+#define ESE_EZ_ENCAP_HDR_GRE 2
+#define ESE_EZ_ENCAP_HDR_VXLAN 1
+#define ESE_EZ_ENCAP_HDR_NONE 0
+#define ESF_DD_RX_EV_RSVD1_LBN 30
+#define ESF_DD_RX_EV_RSVD1_WIDTH 2
+#define ESF_EZ_RX_EV_RSVD1_LBN 31
+#define ESF_EZ_RX_EV_RSVD1_WIDTH 1
+#define ESF_EZ_RX_ABORT_LBN 30
+#define ESF_EZ_RX_ABORT_WIDTH 1
+#define ESF_DZ_RX_ECC_ERR_LBN 29
+#define ESF_DZ_RX_ECC_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC1_ERR_LBN 28
+#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC0_ERR_LBN 27
+#define ESF_DZ_RX_CRC0_ERR_WIDTH 1
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25
+#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_ECRC_ERR_LBN 24
+#define ESF_DZ_RX_ECRC_ERR_WIDTH 1
+#define ESF_DZ_RX_QLABEL_LBN 16
+#define ESF_DZ_RX_QLABEL_WIDTH 5
+#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15
+#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1
+#define ESF_DZ_RX_CONT_LBN 14
+#define ESF_DZ_RX_CONT_WIDTH 1
+#define ESF_DZ_RX_BYTES_LBN 0
+#define ESF_DZ_RX_BYTES_WIDTH 14
+
+
+/* ES_RX_KER_DESC */
+#define ESF_DZ_RX_KER_RESERVED_LBN 62
+#define ESF_DZ_RX_KER_RESERVED_WIDTH 2
+#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_RX_KER_BUF_ADDR_DW0_LBN 0
+#define ESF_DZ_RX_KER_BUF_ADDR_DW0_WIDTH 32
+#define ESF_DZ_RX_KER_BUF_ADDR_DW1_LBN 32
+#define ESF_DZ_RX_KER_BUF_ADDR_DW1_WIDTH 16
+#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
+
+
+/* ES_TX_CSUM_TSTAMP_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_OPTION_TS_AT_TXDP_LBN 8
+#define ESF_DZ_TX_OPTION_TS_AT_TXDP_WIDTH 1
+#define ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_LBN 7
+#define ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_WIDTH 1
+#define ESF_DZ_TX_OPTION_INNER_IP_CSUM_LBN 6
+#define ESF_DZ_TX_OPTION_INNER_IP_CSUM_WIDTH 1
+#define ESF_DZ_TX_TIMESTAMP_LBN 5
+#define ESF_DZ_TX_TIMESTAMP_WIDTH 1
+#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2
+#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2
+#define ESE_DZ_TX_OPTION_CRC_FCOE 1
+#define ESE_DZ_TX_OPTION_CRC_OFF 0
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1
+#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0
+#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1
+
+
+/* ES_TX_EVENT */
+#define ESF_DZ_TX_CODE_LBN 60
+#define ESF_DZ_TX_CODE_WIDTH 4
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_TX_DROP_EVENT_LBN 58
+#define ESF_DZ_TX_DROP_EVENT_WIDTH 1
+#define ESF_DD_TX_EV_RSVD_LBN 48
+#define ESF_DD_TX_EV_RSVD_WIDTH 10
+#define ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_LBN 57
+#define ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_IP_INNER_CHKSUM_ERR_LBN 56
+#define ESF_EZ_IP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_TX_EV_RSVD_LBN 48
+#define ESF_EZ_TX_EV_RSVD_WIDTH 8
+#define ESF_DZ_TX_SOFT2_LBN 32
+#define ESF_DZ_TX_SOFT2_WIDTH 16
+#define ESF_DD_TX_SOFT1_LBN 24
+#define ESF_DD_TX_SOFT1_WIDTH 8
+#define ESF_EZ_TX_CAN_MERGE_LBN 31
+#define ESF_EZ_TX_CAN_MERGE_WIDTH 1
+#define ESF_EZ_TX_SOFT1_LBN 24
+#define ESF_EZ_TX_SOFT1_WIDTH 7
+#define ESF_DZ_TX_QLABEL_LBN 16
+#define ESF_DZ_TX_QLABEL_WIDTH 5
+#define ESF_DZ_TX_DESCR_INDX_LBN 0
+#define ESF_DZ_TX_DESCR_INDX_WIDTH 16
+
+
+/* ES_TX_KER_DESC */
+#define ESF_DZ_TX_KER_TYPE_LBN 63
+#define ESF_DZ_TX_KER_TYPE_WIDTH 1
+#define ESF_DZ_TX_KER_CONT_LBN 62
+#define ESF_DZ_TX_KER_CONT_WIDTH 1
+#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_TX_KER_BUF_ADDR_DW0_LBN 0
+#define ESF_DZ_TX_KER_BUF_ADDR_DW0_WIDTH 32
+#define ESF_DZ_TX_KER_BUF_ADDR_DW1_LBN 32
+#define ESF_DZ_TX_KER_BUF_ADDR_DW1_WIDTH 16
+#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48
+
+
+/* ES_TX_PIO_DESC */
+#define ESF_DZ_TX_PIO_TYPE_LBN 63
+#define ESF_DZ_TX_PIO_TYPE_WIDTH 1
+#define ESF_DZ_TX_PIO_OPT_LBN 60
+#define ESF_DZ_TX_PIO_OPT_WIDTH 3
+#define ESF_DZ_TX_PIO_CONT_LBN 59
+#define ESF_DZ_TX_PIO_CONT_WIDTH 1
+#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
+#define ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12
+#define ESF_DZ_TX_PIO_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12
+
+
+/* ES_TX_TSO_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
+#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8
+#define ESF_DZ_TX_TSO_IP_ID_LBN 32
+#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
+#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+
+
+/* TX_TSO_FATSO2A_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
+#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define ESF_DZ_TX_TSO_IP_ID_LBN 32
+#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
+#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+
+
+/* TX_TSO_FATSO2B_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
+#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define ESF_DZ_TX_TSO_OUTER_IP_ID_LBN 16
+#define ESF_DZ_TX_TSO_OUTER_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_MSS_LBN 32
+#define ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16
+#define ESF_DZ_TX_TSO_INNER_PE_CSUM_LBN 0
+#define ESF_DZ_TX_TSO_INNER_PE_CSUM_WIDTH 16
+
+
+/* ES_TX_VLAN_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_VLAN_OP_LBN 32
+#define ESF_DZ_TX_VLAN_OP_WIDTH 2
+#define ESF_DZ_TX_VLAN_TAG2_LBN 16
+#define ESF_DZ_TX_VLAN_TAG2_WIDTH 16
+#define ESF_DZ_TX_VLAN_TAG1_LBN 0
+#define ESF_DZ_TX_VLAN_TAG1_WIDTH 16
+
+
+/*************************************************************************
+ * NOTE: the comment line above marks the end of the autogenerated section
+ */
+
+/*
+ * The workaround for bug 35388 requires multiplexing writes through
+ * the ERF_DZ_TX_DESC_WPTR address.
+ * TX_DESC_UPD: 0ppppppppppp (bit 11 lost)
+ * EVQ_RPTR: 1000hhhhhhhh, 1001llllllll (split into high and low bits)
+ * EVQ_TMR: 11mmvvvvvvvv (bits 8:13 of value lost)
+ */
+#define ER_DD_EVQ_INDIRECT_OFST (ER_DZ_TX_DESC_UPD_REG_OFST + 2 * 4)
+#define ER_DD_EVQ_INDIRECT_STEP ER_DZ_TX_DESC_UPD_REG_STEP
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9
+#define ERF_DD_EVQ_IND_RPTR_LBN 0
+#define ERF_DD_EVQ_IND_RPTR_WIDTH 8
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2
+#define EFE_DD_EVQ_IND_TIMER_FLAGS 3
+#define ERF_DD_EVQ_IND_TIMER_MODE_LBN 8
+#define ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2
+#define ERF_DD_EVQ_IND_TIMER_VAL_LBN 0
+#define ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8
+
+/* Packed stream magic doorbell command */
+#define ERF_DZ_RX_DESC_MAGIC_DOORBELL_LBN 11
+#define ERF_DZ_RX_DESC_MAGIC_DOORBELL_WIDTH 1
+
+#define ERF_DZ_RX_DESC_MAGIC_CMD_LBN 8
+#define ERF_DZ_RX_DESC_MAGIC_CMD_WIDTH 3
+#define ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS 0
+
+#define ERF_DZ_RX_DESC_MAGIC_DATA_LBN 0
+#define ERF_DZ_RX_DESC_MAGIC_DATA_WIDTH 8
+
+/* Packed stream RX packet prefix */
+#define ES_DZ_PS_RX_PREFIX_TSTAMP_LBN 0
+#define ES_DZ_PS_RX_PREFIX_TSTAMP_WIDTH 32
+#define ES_DZ_PS_RX_PREFIX_CAP_LEN_LBN 32
+#define ES_DZ_PS_RX_PREFIX_CAP_LEN_WIDTH 16
+#define ES_DZ_PS_RX_PREFIX_ORIG_LEN_LBN 48
+#define ES_DZ_PS_RX_PREFIX_ORIG_LEN_WIDTH 16
+
+/*
+ * An extra flag for the packed stream mode,
+ * signalling the start of a new buffer
+ */
+#define ESF_DZ_RX_EV_ROTATE_LBN 53
+#define ESF_DZ_RX_EV_ROTATE_WIDTH 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_EF10_REGS_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_regs_mcdi.h b/src/seastar/dpdk/drivers/net/sfc/base/efx_regs_mcdi.h
new file mode 100644
index 00000000..66896fbb
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_regs_mcdi.h
@@ -0,0 +1,15690 @@
+/*-
+ * Copyright 2008-2013 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*! \cidoxg_firmware_mc_cmd */
+
+#ifndef _SIENA_MC_DRIVER_PCOL_H
+#define _SIENA_MC_DRIVER_PCOL_H
+
+
+/* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */
+/* Power-on reset state */
+#define MC_FW_STATE_POR (1)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash. */
+#define MC_FW_WARM_BOOT_OK (2)
+/* The MC main image has started to boot. */
+#define MC_FW_STATE_BOOTING (4)
+/* The Scheduler has started. */
+#define MC_FW_STATE_SCHED (8)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash.
+ * Unlike a warm boot, assume DMEM has been reloaded, so that
+ * the MC persistent data must be reinitialised. */
+#define MC_FW_TEPID_BOOT_OK (16)
+/* We have entered the main firmware via recovery mode. This
+ * means that MC persistent data must be reinitialised, but that
+ * we shouldn't touch PCIe config. */
+#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32)
+/* BIST state has been initialized */
+#define MC_FW_BIST_INIT_OK (128)
+
+/* Siena MC shared memmory offsets */
+/* The 'doorbell' addresses are hard-wired to alert the MC when written */
+#define MC_SMEM_P0_DOORBELL_OFST 0x000
+#define MC_SMEM_P1_DOORBELL_OFST 0x004
+/* The rest of these are firmware-defined */
+#define MC_SMEM_P0_PDU_OFST 0x008
+#define MC_SMEM_P1_PDU_OFST 0x108
+#define MC_SMEM_PDU_LEN 0x100
+#define MC_SMEM_P0_PTP_TIME_OFST 0x7f0
+#define MC_SMEM_P0_STATUS_OFST 0x7f8
+#define MC_SMEM_P1_STATUS_OFST 0x7fc
+
+/* Values to be written to the per-port status dword in shared
+ * memory on reboot and assert */
+#define MC_STATUS_DWORD_REBOOT (0xb007b007)
+#define MC_STATUS_DWORD_ASSERT (0xdeaddead)
+
+/* Check whether an mcfw version (in host order) belongs to a bootloader */
+#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007)
+
+/* The current version of the MCDI protocol.
+ *
+ * Note that the ROM burnt into the card only talks V0, so at the very
+ * least every driver must support version 0 and MCDI_PCOL_VERSION
+ */
+#ifdef WITH_MCDI_V2
+#define MCDI_PCOL_VERSION 2
+#else
+#define MCDI_PCOL_VERSION 1
+#endif
+
+/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
+
+/* MCDI version 1
+ *
+ * Each MCDI request starts with an MCDI_HEADER, which is a 32bit
+ * structure, filled in by the client.
+ *
+ * 0 7 8 16 20 22 23 24 31
+ * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS |
+ * | | |
+ * | | \--- Response
+ * | \------- Error
+ * \------------------------------ Resync (always set)
+ *
+ * The client writes it's request into MC shared memory, and rings the
+ * doorbell. Each request is completed by either by the MC writting
+ * back into shared memory, or by writting out an event.
+ *
+ * All MCDI commands support completion by shared memory response. Each
+ * request may also contain additional data (accounted for by HEADER.LEN),
+ * and some response's may also contain additional data (again, accounted
+ * for by HEADER.LEN).
+ *
+ * Some MCDI commands support completion by event, in which any associated
+ * response data is included in the event.
+ *
+ * The protocol requires one response to be delivered for every request, a
+ * request should not be sent unless the response for the previous request
+ * has been received (either by polling shared memory, or by receiving
+ * an event).
+ */
+
+/** Request/Response structure */
+#define MCDI_HEADER_OFST 0
+#define MCDI_HEADER_CODE_LBN 0
+#define MCDI_HEADER_CODE_WIDTH 7
+#define MCDI_HEADER_RESYNC_LBN 7
+#define MCDI_HEADER_RESYNC_WIDTH 1
+#define MCDI_HEADER_DATALEN_LBN 8
+#define MCDI_HEADER_DATALEN_WIDTH 8
+#define MCDI_HEADER_SEQ_LBN 16
+#define MCDI_HEADER_SEQ_WIDTH 4
+#define MCDI_HEADER_RSVD_LBN 20
+#define MCDI_HEADER_RSVD_WIDTH 1
+#define MCDI_HEADER_NOT_EPOCH_LBN 21
+#define MCDI_HEADER_NOT_EPOCH_WIDTH 1
+#define MCDI_HEADER_ERROR_LBN 22
+#define MCDI_HEADER_ERROR_WIDTH 1
+#define MCDI_HEADER_RESPONSE_LBN 23
+#define MCDI_HEADER_RESPONSE_WIDTH 1
+#define MCDI_HEADER_XFLAGS_LBN 24
+#define MCDI_HEADER_XFLAGS_WIDTH 8
+/* Request response using event */
+#define MCDI_HEADER_XFLAGS_EVREQ 0x01
+/* Request (and signal) early doorbell return */
+#define MCDI_HEADER_XFLAGS_DBRET 0x02
+
+/* Maximum number of payload bytes */
+#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
+#define MCDI_CTL_SDU_LEN_MAX_V2 0x400
+
+#ifdef WITH_MCDI_V2
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2
+#else
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V1
+#endif
+
+
+/* The MC can generate events for two reasons:
+ * - To advance a shared memory request if XFLAGS_EVREQ was set
+ * - As a notification (link state, i2c event), controlled
+ * via MC_CMD_LOG_CTRL
+ *
+ * Both events share a common structure:
+ *
+ * 0 32 33 36 44 52 60
+ * | Data | Cont | Level | Src | Code | Rsvd |
+ * |
+ * \ There is another event pending in this notification
+ *
+ * If Code==CMDDONE, then the fields are further interpreted as:
+ *
+ * - LEVEL==INFO Command succeeded
+ * - LEVEL==ERR Command failed
+ *
+ * 0 8 16 24 32
+ * | Seq | Datalen | Errno | Rsvd |
+ *
+ * These fields are taken directly out of the standard MCDI header, i.e.,
+ * LEVEL==ERR, Datalen == 0 => Reboot
+ *
+ * Events can be squirted out of the UART (using LOG_CTRL) without a
+ * MCDI header. An event can be distinguished from a MCDI response by
+ * examining the first byte which is 0xc0. This corresponds to the
+ * non-existent MCDI command MC_CMD_DEBUG_LOG.
+ *
+ * 0 7 8
+ * | command | Resync | = 0xc0
+ *
+ * Since the event is written in big-endian byte order, this works
+ * providing bits 56-63 of the event are 0xc0.
+ *
+ * 56 60 63
+ * | Rsvd | Code | = 0xc0
+ *
+ * Which means for convenience the event code is 0xc for all MC
+ * generated events.
+ */
+#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+
+
+/* Operation not permitted. */
+#define MC_CMD_ERR_EPERM 1
+/* Non-existent command target */
+#define MC_CMD_ERR_ENOENT 2
+/* assert() has killed the MC */
+#define MC_CMD_ERR_EINTR 4
+/* I/O failure */
+#define MC_CMD_ERR_EIO 5
+/* Already exists */
+#define MC_CMD_ERR_EEXIST 6
+/* Try again */
+#define MC_CMD_ERR_EAGAIN 11
+/* Out of memory */
+#define MC_CMD_ERR_ENOMEM 12
+/* Caller does not hold required locks */
+#define MC_CMD_ERR_EACCES 13
+/* Resource is currently unavailable (e.g. lock contention) */
+#define MC_CMD_ERR_EBUSY 16
+/* No such device */
+#define MC_CMD_ERR_ENODEV 19
+/* Invalid argument to target */
+#define MC_CMD_ERR_EINVAL 22
+/* Broken pipe */
+#define MC_CMD_ERR_EPIPE 32
+/* Read-only */
+#define MC_CMD_ERR_EROFS 30
+/* Out of range */
+#define MC_CMD_ERR_ERANGE 34
+/* Non-recursive resource is already acquired */
+#define MC_CMD_ERR_EDEADLK 35
+/* Operation not implemented */
+#define MC_CMD_ERR_ENOSYS 38
+/* Operation timed out */
+#define MC_CMD_ERR_ETIME 62
+/* Link has been severed */
+#define MC_CMD_ERR_ENOLINK 67
+/* Protocol error */
+#define MC_CMD_ERR_EPROTO 71
+/* Operation not supported */
+#define MC_CMD_ERR_ENOTSUP 95
+/* Address not available */
+#define MC_CMD_ERR_EADDRNOTAVAIL 99
+/* Not connected */
+#define MC_CMD_ERR_ENOTCONN 107
+/* Operation already in progress */
+#define MC_CMD_ERR_EALREADY 114
+
+/* Resource allocation failed. */
+#define MC_CMD_ERR_ALLOC_FAIL 0x1000
+/* V-adaptor not found. */
+#define MC_CMD_ERR_NO_VADAPTOR 0x1001
+/* EVB port not found. */
+#define MC_CMD_ERR_NO_EVB_PORT 0x1002
+/* V-switch not found. */
+#define MC_CMD_ERR_NO_VSWITCH 0x1003
+/* Too many VLAN tags. */
+#define MC_CMD_ERR_VLAN_LIMIT 0x1004
+/* Bad PCI function number. */
+#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
+/* Invalid VLAN mode. */
+#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
+/* Invalid v-switch type. */
+#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
+/* Invalid v-port type. */
+#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
+/* MAC address exists. */
+#define MC_CMD_ERR_MAC_EXIST 0x1009
+/* Slave core not present */
+#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
+/* The requested operation might require the
+ command to be passed between MCs, and the
+ transport doesn't support that. Should
+ only ever been seen over the UART. */
+#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
+/* VLAN tag(s) exists */
+#define MC_CMD_ERR_VLAN_EXIST 0x100e
+/* No MAC address assigned to an EVB port */
+#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
+/* Notifies the driver that the request has been relayed
+ * to an admin function for authorization. The driver should
+ * wait for a PROXY_RESPONSE event and then resend its request.
+ * This error code is followed by a 32-bit handle that
+ * helps matching it with the respective PROXY_RESPONSE event. */
+#define MC_CMD_ERR_PROXY_PENDING 0x1010
+#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
+/* The request cannot be passed for authorization because
+ * another request from the same function is currently being
+ * authorized. The drvier should try again later. */
+#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
+/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
+ * that has enabled proxying or BLOCK_INDEX points to a function that
+ * doesn't await an authorization. */
+#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
+/* This code is currently only used internally in FW. Its meaning is that
+ * an operation failed due to lack of SR-IOV privilege.
+ * Normally it is translated to EPERM by send_cmd_err(),
+ * but it may also be used to trigger some special mechanism
+ * for handling such case, e.g. to relay the failed request
+ * to a designated admin function for authorization. */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/* Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807. */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
+/* The clock whose frequency you've attempted to set set
+ * doesn't exist on this NIC */
+#define MC_CMD_ERR_NO_CLOCK 0x1015
+/* Returned by MC_CMD_TESTASSERT if the action that should
+ * have caused an assertion failed to do so. */
+#define MC_CMD_ERR_UNREACHABLE 0x1016
+/* This command needs to be processed in the background but there were no
+ * resources to do so. Send it again after a command has completed. */
+#define MC_CMD_ERR_QUEUE_FULL 0x1017
+
+#define MC_CMD_ERR_CODE_OFST 0
+
+/* We define 8 "escape" commands to allow
+ for command number space extension */
+
+#define MC_CMD_CMD_SPACE_ESCAPE_0 0x78
+#define MC_CMD_CMD_SPACE_ESCAPE_1 0x79
+#define MC_CMD_CMD_SPACE_ESCAPE_2 0x7A
+#define MC_CMD_CMD_SPACE_ESCAPE_3 0x7B
+#define MC_CMD_CMD_SPACE_ESCAPE_4 0x7C
+#define MC_CMD_CMD_SPACE_ESCAPE_5 0x7D
+#define MC_CMD_CMD_SPACE_ESCAPE_6 0x7E
+#define MC_CMD_CMD_SPACE_ESCAPE_7 0x7F
+
+/* Vectors in the boot ROM */
+/* Point to the copycode entry point. */
+#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
+#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
+#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4)
+/* Points to the recovery mode entry point. */
+#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
+#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
+#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4)
+
+/* The command set exported by the boot ROM (MCDI v0) */
+#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
+ (1 << MC_CMD_READ32) | \
+ (1 << MC_CMD_WRITE32) | \
+ (1 << MC_CMD_COPYCODE) | \
+ (1 << MC_CMD_GET_VERSION), \
+ 0, 0, 0 }
+
+#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \
+ (MC_CMD_SENSOR_ENTRY_OFST + (_x))
+
+#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default
+ * stack ID (which must be in the range 1-255) along with an EVB port ID.
+ */
+#define EVB_STACK_ID(n) (((n) & 0xff) << 16)
+
+
+#ifdef WITH_MCDI_V2
+
+/* Version 2 adds an optional argument to error returns: the errno value
+ * may be followed by the (0-based) number of the first argument that
+ * could not be processed.
+ */
+#define MC_CMD_ERR_ARG_OFST 4
+
+/* No space */
+#define MC_CMD_ERR_ENOSPC 28
+
+#endif
+
+/* MCDI_EVENT structuredef */
+#define MCDI_EVENT_LEN 8
+#define MCDI_EVENT_CONT_LBN 32
+#define MCDI_EVENT_CONT_WIDTH 1
+#define MCDI_EVENT_LEVEL_LBN 33
+#define MCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define MCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MCDI_EVENT_LEVEL_FATAL 0x3
+#define MCDI_EVENT_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
+#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
+#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
+#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
+#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16
+#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+/* enum: 100Mbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
+/* enum: 1Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2
+/* enum: 10Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
+/* enum: 40Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
+#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
+#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
+#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_STATE_LBN 8
+#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16
+#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
+#define MCDI_EVENT_FWALERT_DATA_LBN 8
+#define MCDI_EVENT_FWALERT_DATA_WIDTH 24
+#define MCDI_EVENT_FWALERT_REASON_LBN 0
+#define MCDI_EVENT_FWALERT_REASON_WIDTH 8
+/* enum: SRAM Access. */
+#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1
+#define MCDI_EVENT_FLR_VF_LBN 0
+#define MCDI_EVENT_FLR_VF_WIDTH 8
+#define MCDI_EVENT_TX_ERR_TXQ_LBN 0
+#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
+#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
+/* enum: Descriptor loader reported failure */
+#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1
+/* enum: Descriptor ring empty and no EOP seen for packet */
+#define MCDI_EVENT_TX_ERR_NO_EOP 0x2
+/* enum: Overlength packet */
+#define MCDI_EVENT_TX_ERR_2BIG 0x3
+/* enum: Malformed option descriptor */
+#define MCDI_EVENT_TX_BAD_OPTDESC 0x5
+/* enum: Option descriptor part way through a packet */
+#define MCDI_EVENT_TX_OPT_IN_PKT 0x8
+/* enum: DMA or PIO data access error */
+#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9
+#define MCDI_EVENT_TX_ERR_INFO_LBN 16
+#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
+#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
+#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0
+#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8
+/* enum: PLL lost lock */
+#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1
+/* enum: Filter overflow (PDMA) */
+#define MCDI_EVENT_PTP_ERR_FILTER 0x2
+/* enum: FIFO overflow (FPGA) */
+#define MCDI_EVENT_PTP_ERR_FIFO 0x3
+/* enum: Merge queue overflow */
+#define MCDI_EVENT_PTP_ERR_QUEUE 0x4
+#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0
+#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8
+/* enum: AOE failed to load - no valid image? */
+#define MCDI_EVENT_AOE_NO_LOAD 0x1
+/* enum: AOE FC reported an exception */
+#define MCDI_EVENT_AOE_FC_ASSERT 0x2
+/* enum: AOE FC watchdogged */
+#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3
+/* enum: AOE FC failed to start */
+#define MCDI_EVENT_AOE_FC_NO_START 0x4
+/* enum: Generic AOE fault - likely to have been reported via other means too
+ * but intended for use by aoex driver.
+ */
+#define MCDI_EVENT_AOE_FAULT 0x5
+/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6
+/* enum: AOE loaded successfully */
+#define MCDI_EVENT_AOE_LOAD 0x7
+/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_DMA 0x8
+/* enum: AOE byteblaster connected/disconnected (Connection status in
+ * AOE_ERR_DATA)
+ */
+#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
+/* enum: DDR ECC status update */
+#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
+/* enum: PTP status update */
+#define MCDI_EVENT_AOE_PTP_STATUS 0xb
+/* enum: FPGA header incorrect */
+#define MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc
+/* enum: FPGA Powered Off due to error in powering up FPGA */
+#define MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd
+/* enum: AOE FPGA load failed due to MC to MUM communication failure */
+#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
+/* enum: Notify that invalid flash type detected */
+#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
+/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
+#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
+#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8
+/* enum: Reading from NV failed */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0
+/* enum: Invalid Magic Number if FPGA header */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1
+/* enum: Invalid Silicon type detected in header */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2
+/* enum: Unsupported VRatio */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3
+/* enum: Unsupported DDR Type */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4
+/* enum: DDR Voltage out of supported range */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5
+/* enum: Unsupported DDR speed */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6
+/* enum: Unsupported DDR size */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7
+/* enum: Unsupported DDR rank */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8
+/* enum: Primary boot flash */
+#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0
+/* enum: Secondary boot flash */
+#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8
+#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
+#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
+#define MCDI_EVENT_RX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4
+#define MCDI_EVENT_RX_ERR_INFO_LBN 16
+#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0
+#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
+#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
+#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
+#define MCDI_EVENT_MUM_ERR_TYPE_LBN 0
+#define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8
+/* enum: MUM failed to load - no valid image? */
+#define MCDI_EVENT_MUM_NO_LOAD 0x1
+/* enum: MUM f/w reported an exception */
+#define MCDI_EVENT_MUM_ASSERT 0x2
+/* enum: MUM not kicking watchdog */
+#define MCDI_EVENT_MUM_WATCHDOG 0x3
+#define MCDI_EVENT_MUM_ERR_DATA_LBN 8
+#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_DATA_LBN 0
+#define MCDI_EVENT_DATA_WIDTH 32
+#define MCDI_EVENT_SRC_LBN 36
+#define MCDI_EVENT_SRC_WIDTH 8
+#define MCDI_EVENT_EV_CODE_LBN 60
+#define MCDI_EVENT_EV_CODE_WIDTH 4
+#define MCDI_EVENT_CODE_LBN 44
+#define MCDI_EVENT_CODE_WIDTH 8
+/* enum: Event generated by host software */
+#define MCDI_EVENT_SW_EVENT 0x0
+/* enum: Bad assert. */
+#define MCDI_EVENT_CODE_BADSSERT 0x1
+/* enum: PM Notice. */
+#define MCDI_EVENT_CODE_PMNOTICE 0x2
+/* enum: Command done. */
+#define MCDI_EVENT_CODE_CMDDONE 0x3
+/* enum: Link change. */
+#define MCDI_EVENT_CODE_LINKCHANGE 0x4
+/* enum: Sensor Event. */
+#define MCDI_EVENT_CODE_SENSOREVT 0x5
+/* enum: Schedule error. */
+#define MCDI_EVENT_CODE_SCHEDERR 0x6
+/* enum: Reboot. */
+#define MCDI_EVENT_CODE_REBOOT 0x7
+/* enum: Mac stats DMA. */
+#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8
+/* enum: Firmware alert. */
+#define MCDI_EVENT_CODE_FWALERT 0x9
+/* enum: Function level reset. */
+#define MCDI_EVENT_CODE_FLR 0xa
+/* enum: Transmit error */
+#define MCDI_EVENT_CODE_TX_ERR 0xb
+/* enum: Tx flush has completed */
+#define MCDI_EVENT_CODE_TX_FLUSH 0xc
+/* enum: PTP packet received timestamp */
+#define MCDI_EVENT_CODE_PTP_RX 0xd
+/* enum: PTP NIC failure */
+#define MCDI_EVENT_CODE_PTP_FAULT 0xe
+/* enum: PTP PPS event */
+#define MCDI_EVENT_CODE_PTP_PPS 0xf
+/* enum: Rx flush has completed */
+#define MCDI_EVENT_CODE_RX_FLUSH 0x10
+/* enum: Receive error */
+#define MCDI_EVENT_CODE_RX_ERR 0x11
+/* enum: AOE fault */
+#define MCDI_EVENT_CODE_AOE 0x12
+/* enum: Network port calibration failed (VCAL). */
+#define MCDI_EVENT_CODE_VCAL_FAIL 0x13
+/* enum: HW PPS event */
+#define MCDI_EVENT_CODE_HW_PPS 0x14
+/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and
+ * a different format)
+ */
+#define MCDI_EVENT_CODE_MC_REBOOT 0x15
+/* enum: the MC has detected a parity error */
+#define MCDI_EVENT_CODE_PAR_ERR 0x16
+/* enum: the MC has detected a correctable error */
+#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
+/* enum: the MC has detected an uncorrectable error */
+#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
+/* enum: The MC has entered offline BIST mode */
+#define MCDI_EVENT_CODE_MC_BIST 0x19
+/* enum: PTP tick event providing current NIC time */
+#define MCDI_EVENT_CODE_PTP_TIME 0x1a
+/* enum: MUM fault */
+#define MCDI_EVENT_CODE_MUM 0x1b
+/* enum: notify the designated PF of a new authorization request */
+#define MCDI_EVENT_CODE_PROXY_REQUEST 0x1c
+/* enum: notify a function that awaits an authorization that its request has
+ * been processed and it may now resend the command
+ */
+#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
+/* enum: Artificial event generated by host and posted via MC for test
+ * purposes.
+ */
+#define MCDI_EVENT_CODE_TESTGEN 0xfa
+#define MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_DATA_LBN 0
+#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
+#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
+#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
+#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
+#define MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define MCDI_EVENT_TX_ERR_DATA_LBN 0
+#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_SECONDS_OFST 0
+#define MCDI_EVENT_PTP_SECONDS_LBN 0
+#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_MAJOR_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
+ * of timestamp
+ */
+#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
+#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
+#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MINOR_OFST 0
+#define MCDI_EVENT_PTP_MINOR_LBN 0
+#define MCDI_EVENT_PTP_MINOR_WIDTH 32
+/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
+ */
+#define MCDI_EVENT_PTP_UUID_OFST 0
+#define MCDI_EVENT_PTP_UUID_LBN 0
+#define MCDI_EVENT_PTP_UUID_WIDTH 32
+#define MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define MCDI_EVENT_RX_ERR_DATA_LBN 0
+#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define MCDI_EVENT_PAR_ERR_DATA_LBN 0
+#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
+/* For CODE_PTP_TIME events, the major value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
+/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC clock has ever been set
+ */
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC and System clocks are in sync
+ */
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of
+ * the minor value of the PTP clock
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
+/* Zero means that the request has been completed or authorized, and the driver
+ * should resend it. A non-zero value means that the authorization has been
+ * denied, and gives the reason. Typically it will be EPERM.
+ */
+#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
+#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
+
+/* FCDI_EVENT structuredef */
+#define FCDI_EVENT_LEN 8
+#define FCDI_EVENT_CONT_LBN 32
+#define FCDI_EVENT_CONT_WIDTH 1
+#define FCDI_EVENT_LEVEL_LBN 33
+#define FCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define FCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define FCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define FCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define FCDI_EVENT_LEVEL_FATAL 0x3
+#define FCDI_EVENT_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
+#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
+#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
+#define FCDI_EVENT_LINK_UP 0x1 /* enum */
+#define FCDI_EVENT_DATA_LBN 0
+#define FCDI_EVENT_DATA_WIDTH 32
+#define FCDI_EVENT_SRC_LBN 36
+#define FCDI_EVENT_SRC_WIDTH 8
+#define FCDI_EVENT_EV_CODE_LBN 60
+#define FCDI_EVENT_EV_CODE_WIDTH 4
+#define FCDI_EVENT_CODE_LBN 44
+#define FCDI_EVENT_CODE_WIDTH 8
+/* enum: The FC was rebooted. */
+#define FCDI_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define FCDI_EVENT_CODE_ASSERT 0x2
+/* enum: DDR3 test result. */
+#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
+/* enum: Link status. */
+#define FCDI_EVENT_CODE_LINK_STATE 0x4
+/* enum: A timed read is ready to be serviced. */
+#define FCDI_EVENT_CODE_TIMED_READ 0x5
+/* enum: One or more PPS IN events */
+#define FCDI_EVENT_CODE_PPS_IN 0x6
+/* enum: Tick event from PTP clock */
+#define FCDI_EVENT_CODE_PTP_TICK 0x7
+/* enum: ECC error counters */
+#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
+/* enum: Current status of PTP */
+#define FCDI_EVENT_CODE_PTP_STATUS 0x9
+/* enum: Port id config to map MC-FC port idx */
+#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
+/* enum: Boot result or error code */
+#define FCDI_EVENT_CODE_BOOT_RESULT 0xb
+#define FCDI_EVENT_REBOOT_SRC_LBN 36
+#define FCDI_EVENT_REBOOT_SRC_WIDTH 8
+#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
+#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
+#define FCDI_EVENT_ASSERT_TYPE_LBN 36
+#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
+#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
+#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define FCDI_EVENT_PTP_STATE_OFST 0
+#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
+#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
+#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
+#define FCDI_EVENT_PTP_STATE_LBN 0
+#define FCDI_EVENT_PTP_STATE_WIDTH 32
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+/* Index of MC port being referred to */
+#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
+#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
+/* FC Port index that matches the MC port index in SRC */
+#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
+#define FCDI_EVENT_BOOT_RESULT_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
+#define FCDI_EVENT_BOOT_RESULT_LBN 0
+#define FCDI_EVENT_BOOT_RESULT_WIDTH 32
+
+/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
+ * to the MC. Note that this structure | is overlayed over a normal FCDI event
+ * such that bits 32-63 containing | event code, level, source etc remain the
+ * same. In this case the data | field of the header is defined to be the
+ * number of timestamps
+ */
+#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16
+#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248
+#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
+/* Number of timestamps following */
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
+/* Seconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
+/* Nanoseconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
+/* Timestamp records comprising the event */
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
+
+/* MUM_EVENT structuredef */
+#define MUM_EVENT_LEN 8
+#define MUM_EVENT_CONT_LBN 32
+#define MUM_EVENT_CONT_WIDTH 1
+#define MUM_EVENT_LEVEL_LBN 33
+#define MUM_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define MUM_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MUM_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MUM_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MUM_EVENT_LEVEL_FATAL 0x3
+#define MUM_EVENT_DATA_OFST 0
+#define MUM_EVENT_SENSOR_ID_LBN 0
+#define MUM_EVENT_SENSOR_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MUM_EVENT_SENSOR_STATE_LBN 8
+#define MUM_EVENT_SENSOR_STATE_WIDTH 8
+#define MUM_EVENT_PORT_PHY_READY_LBN 0
+#define MUM_EVENT_PORT_PHY_READY_WIDTH 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
+#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
+#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
+#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
+#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
+#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
+#define MUM_EVENT_DATA_LBN 0
+#define MUM_EVENT_DATA_WIDTH 32
+#define MUM_EVENT_SRC_LBN 36
+#define MUM_EVENT_SRC_WIDTH 8
+#define MUM_EVENT_EV_CODE_LBN 60
+#define MUM_EVENT_EV_CODE_WIDTH 4
+#define MUM_EVENT_CODE_LBN 44
+#define MUM_EVENT_CODE_WIDTH 8
+/* enum: The MUM was rebooted. */
+#define MUM_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define MUM_EVENT_CODE_ASSERT 0x2
+/* enum: Sensor failure. */
+#define MUM_EVENT_CODE_SENSOR 0x3
+/* enum: Link fault has been asserted, or has cleared. */
+#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
+#define MUM_EVENT_SENSOR_DATA_OFST 0
+#define MUM_EVENT_SENSOR_DATA_LBN 0
+#define MUM_EVENT_SENSOR_DATA_WIDTH 32
+#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
+#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
+#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
+#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
+#define MUM_EVENT_PORT_PHY_TECH_LBN 0
+#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_READ32
+ * Read multiple 32byte words from MC memory.
+ */
+#define MC_CMD_READ32 0x1
+#undef MC_CMD_0x1_PRIVILEGE_CTG
+
+#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ32_IN msgrequest */
+#define MC_CMD_READ32_IN_LEN 8
+#define MC_CMD_READ32_IN_ADDR_OFST 0
+#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
+
+/* MC_CMD_READ32_OUT msgresponse */
+#define MC_CMD_READ32_OUT_LENMIN 4
+#define MC_CMD_READ32_OUT_LENMAX 252
+#define MC_CMD_READ32_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_READ32_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_WRITE32
+ * Write multiple 32byte words to MC memory.
+ */
+#define MC_CMD_WRITE32 0x2
+#undef MC_CMD_0x2_PRIVILEGE_CTG
+
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_WRITE32_IN msgrequest */
+#define MC_CMD_WRITE32_IN_LENMIN 8
+#define MC_CMD_WRITE32_IN_LENMAX 252
+#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
+#define MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
+#define MC_CMD_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62
+
+/* MC_CMD_WRITE32_OUT msgresponse */
+#define MC_CMD_WRITE32_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_COPYCODE
+ * Copy MC code between two locations and jump.
+ */
+#define MC_CMD_COPYCODE 0x3
+#undef MC_CMD_0x3_PRIVILEGE_CTG
+
+#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_COPYCODE_IN msgrequest */
+#define MC_CMD_COPYCODE_IN_LEN 16
+/* Source address
+ *
+ * The main image should be entered via a copy of a single word from and to a
+ * magic address, which controls various aspects of the boot. The magic address
+ * is a bitfield, with each bit as documented below.
+ */
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
+#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
+ */
+#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
+ * below)
+ */
+#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
+/* Destination address */
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+/* Address of where to jump after copy. */
+#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
+/* enum: Control should return to the caller rather than jumping */
+#define MC_CMD_COPYCODE_JUMP_NONE 0x1
+
+/* MC_CMD_COPYCODE_OUT msgresponse */
+#define MC_CMD_COPYCODE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_FUNC
+ * Select function for function-specific commands.
+ */
+#define MC_CMD_SET_FUNC 0x4
+#undef MC_CMD_0x4_PRIVILEGE_CTG
+
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_FUNC_IN msgrequest */
+#define MC_CMD_SET_FUNC_IN_LEN 4
+/* Set function */
+#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+
+/* MC_CMD_SET_FUNC_OUT msgresponse */
+#define MC_CMD_SET_FUNC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_BOOT_STATUS
+ * Get the instruction address from which the MC booted.
+ */
+#define MC_CMD_GET_BOOT_STATUS 0x5
+#undef MC_CMD_0x5_PRIVILEGE_CTG
+
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
+#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
+
+/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */
+#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
+/* ?? */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+/* enum: indicates that the MC wasn't flash booted */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_GET_ASSERTS
+ * Get (and optionally clear) the current assertion status. Only
+ * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other
+ * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS
+ */
+#define MC_CMD_GET_ASSERTS 0x6
+#undef MC_CMD_0x6_PRIVILEGE_CTG
+
+#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_ASSERTS_IN msgrequest */
+#define MC_CMD_GET_ASSERTS_IN_LEN 4
+/* Set to clear assertion */
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+
+/* MC_CMD_GET_ASSERTS_OUT msgresponse */
+#define MC_CMD_GET_ASSERTS_OUT_LEN 140
+/* Assertion status flag. */
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+/* enum: No assertions have failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
+/* enum: A system-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2
+/* enum: A thread-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3
+/* enum: The system was reset by the watchdog. */
+#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4
+/* enum: An illegal address trap stopped the system (huntington and later) */
+#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
+/* Failing PC value */
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+/* Saved GP regs */
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
+/* Failing thread address */
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
+
+
+/***********************************/
+/* MC_CMD_LOG_CTRL
+ * Configure the output stream for log events such as link state changes,
+ * sensor notifications and MCDI completions
+ */
+#define MC_CMD_LOG_CTRL 0x7
+#undef MC_CMD_0x7_PRIVILEGE_CTG
+
+#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LOG_CTRL_IN msgrequest */
+#define MC_CMD_LOG_CTRL_IN_LEN 8
+/* Log destination */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+/* enum: UART. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
+/* enum: Event queue. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
+/* Legacy argument. Must be zero. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+
+/* MC_CMD_LOG_CTRL_OUT msgresponse */
+#define MC_CMD_LOG_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VERSION
+ * Get version information about the MC firmware.
+ */
+#define MC_CMD_GET_VERSION 0x8
+#undef MC_CMD_0x8_PRIVILEGE_CTG
+
+#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VERSION_IN msgrequest */
+#define MC_CMD_GET_VERSION_IN_LEN 0
+
+/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */
+#define MC_CMD_GET_VERSION_EXT_IN_LEN 4
+/* placeholder, set to 0 */
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+
+/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
+#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+/* enum: Reserved version number to indicate "any" version. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
+/* enum: Bootrom version value for Siena. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
+/* enum: Bootrom version value for Huntington. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
+
+/* MC_CMD_GET_VERSION_OUT msgresponse */
+#define MC_CMD_GET_VERSION_OUT_LEN 32
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
+
+/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
+#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28
+/* extra info */
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16
+
+
+/***********************************/
+/* MC_CMD_FC
+ * Perform an FC operation
+ */
+#define MC_CMD_FC 0x9
+
+/* MC_CMD_FC_IN msgrequest */
+#define MC_CMD_FC_IN_LEN 4
+#define MC_CMD_FC_IN_OP_HDR_OFST 0
+#define MC_CMD_FC_IN_OP_LBN 0
+#define MC_CMD_FC_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to FC. */
+#define MC_CMD_FC_OP_NULL 0x1
+/* enum: Unused opcode */
+#define MC_CMD_FC_OP_UNUSED 0x2
+/* enum: MAC driver commands */
+#define MC_CMD_FC_OP_MAC 0x3
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_READ32 0x4
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_WRITE32 0x5
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_TRC_READ 0x6
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_TRC_WRITE 0x7
+/* enum: FC firmware Version */
+#define MC_CMD_FC_OP_GET_VERSION 0x8
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_TRC_RX_READ 0x9
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_TRC_RX_WRITE 0xa
+/* enum: SFP parameters */
+#define MC_CMD_FC_OP_SFP 0xb
+/* enum: DDR3 test */
+#define MC_CMD_FC_OP_DDR_TEST 0xc
+/* enum: Get Crash context from FC */
+#define MC_CMD_FC_OP_GET_ASSERT 0xd
+/* enum: Get FPGA Build registers */
+#define MC_CMD_FC_OP_FPGA_BUILD 0xe
+/* enum: Read map support commands */
+#define MC_CMD_FC_OP_READ_MAP 0xf
+/* enum: FC Capabilities */
+#define MC_CMD_FC_OP_CAPABILITIES 0x10
+/* enum: FC Global flags */
+#define MC_CMD_FC_OP_GLOBAL_FLAGS 0x11
+/* enum: FC IO using relative addressing modes */
+#define MC_CMD_FC_OP_IO_REL 0x12
+/* enum: FPGA link information */
+#define MC_CMD_FC_OP_UHLINK 0x13
+/* enum: Configure loopbacks and link on FPGA ports */
+#define MC_CMD_FC_OP_SET_LINK 0x14
+/* enum: Licensing operations relating to AOE */
+#define MC_CMD_FC_OP_LICENSE 0x15
+/* enum: Startup information to the FC */
+#define MC_CMD_FC_OP_STARTUP 0x16
+/* enum: Configure a DMA read */
+#define MC_CMD_FC_OP_DMA 0x17
+/* enum: Configure a timed read */
+#define MC_CMD_FC_OP_TIMED_READ 0x18
+/* enum: Control UART logging */
+#define MC_CMD_FC_OP_LOG 0x19
+/* enum: Get the value of a given clock_id */
+#define MC_CMD_FC_OP_CLOCK 0x1a
+/* enum: DDR3/QDR3 parameters */
+#define MC_CMD_FC_OP_DDR 0x1b
+/* enum: PTP and timestamp control */
+#define MC_CMD_FC_OP_TIMESTAMP 0x1c
+/* enum: Commands for SPI Flash interface */
+#define MC_CMD_FC_OP_SPI 0x1d
+/* enum: Commands for diagnostic components */
+#define MC_CMD_FC_OP_DIAG 0x1e
+/* enum: External AOE port. */
+#define MC_CMD_FC_IN_PORT_EXT_OFST 0x0
+/* enum: Internal AOE port. */
+#define MC_CMD_FC_IN_PORT_INT_OFST 0x40
+
+/* MC_CMD_FC_IN_NULL msgrequest */
+#define MC_CMD_FC_IN_NULL_LEN 4
+#define MC_CMD_FC_IN_CMD_OFST 0
+
+/* MC_CMD_FC_IN_PHY msgrequest */
+#define MC_CMD_FC_IN_PHY_LEN 5
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* FC PHY driver operation code */
+#define MC_CMD_FC_IN_PHY_OP_OFST 4
+#define MC_CMD_FC_IN_PHY_OP_LEN 1
+/* enum: PHY init handler */
+#define MC_CMD_FC_OP_PHY_OP_INIT 0x1
+/* enum: PHY reconfigure handler */
+#define MC_CMD_FC_OP_PHY_OP_RECONFIGURE 0x2
+/* enum: PHY reboot handler */
+#define MC_CMD_FC_OP_PHY_OP_REBOOT 0x3
+/* enum: PHY get_supported_cap handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_SUPPORTED_CAP 0x4
+/* enum: PHY get_config handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_CONFIG 0x5
+/* enum: PHY get_media_info handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_MEDIA_INFO 0x6
+/* enum: PHY set_led handler */
+#define MC_CMD_FC_OP_PHY_OP_SET_LED 0x7
+/* enum: PHY lasi_interrupt handler */
+#define MC_CMD_FC_OP_PHY_OP_LASI_INTERRUPT 0x8
+/* enum: PHY check_link handler */
+#define MC_CMD_FC_OP_PHY_OP_CHECK_LINK 0x9
+/* enum: PHY fill_stats handler */
+#define MC_CMD_FC_OP_PHY_OP_FILL_STATS 0xa
+/* enum: PHY bpx_link_state_changed handler */
+#define MC_CMD_FC_OP_PHY_OP_BPX_LINK_STATE_CHANGED 0xb
+/* enum: PHY get_state handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_STATE 0xc
+/* enum: PHY start_bist handler */
+#define MC_CMD_FC_OP_PHY_OP_START_BIST 0xd
+/* enum: PHY poll_bist handler */
+#define MC_CMD_FC_OP_PHY_OP_POLL_BIST 0xe
+/* enum: PHY nvram_test handler */
+#define MC_CMD_FC_OP_PHY_OP_NVRAM_TEST 0xf
+/* enum: PHY relinquish handler */
+#define MC_CMD_FC_OP_PHY_OP_RELINQUISH_SPI 0x10
+/* enum: PHY read connection from FC - may be not required */
+#define MC_CMD_FC_OP_PHY_OP_GET_CONNECTION 0x11
+/* enum: PHY read flags from FC - may be not required */
+#define MC_CMD_FC_OP_PHY_OP_GET_FLAGS 0x12
+
+/* MC_CMD_FC_IN_PHY_INIT msgrequest */
+#define MC_CMD_FC_IN_PHY_INIT_LEN 4
+#define MC_CMD_FC_IN_PHY_CMD_OFST 0
+
+/* MC_CMD_FC_IN_MAC msgrequest */
+#define MC_CMD_FC_IN_MAC_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_MAC_HEADER_OFST 4
+#define MC_CMD_FC_IN_MAC_OP_LBN 0
+#define MC_CMD_FC_IN_MAC_OP_WIDTH 8
+/* enum: MAC reconfigure handler */
+#define MC_CMD_FC_OP_MAC_OP_RECONFIGURE 0x1
+/* enum: MAC Set command - same as MC_CMD_SET_MAC */
+#define MC_CMD_FC_OP_MAC_OP_SET_LINK 0x2
+/* enum: MAC statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_STATS 0x3
+/* enum: MAC RX statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_RX_STATS 0x6
+/* enum: MAC TX statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_TX_STATS 0x7
+/* enum: MAC Read status */
+#define MC_CMD_FC_OP_MAC_OP_READ_STATUS 0x8
+#define MC_CMD_FC_IN_MAC_PORT_TYPE_LBN 8
+#define MC_CMD_FC_IN_MAC_PORT_TYPE_WIDTH 8
+/* enum: External FPGA port. */
+#define MC_CMD_FC_PORT_EXT 0x0
+/* enum: Internal Siena-facing FPGA ports. */
+#define MC_CMD_FC_PORT_INT 0x1
+#define MC_CMD_FC_IN_MAC_PORT_IDX_LBN 16
+#define MC_CMD_FC_IN_MAC_PORT_IDX_WIDTH 8
+#define MC_CMD_FC_IN_MAC_CMD_FORMAT_LBN 24
+#define MC_CMD_FC_IN_MAC_CMD_FORMAT_WIDTH 8
+/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are
+ * irrelevant. Port number is derived from pci_fn; passed in FC header.
+ */
+#define MC_CMD_FC_OP_MAC_CMD_FORMAT_DEFAULT 0x0
+/* enum: Override default port number. Port number determined by fields
+ * PORT_TYPE and PORT_IDX.
+ */
+#define MC_CMD_FC_OP_MAC_CMD_FORMAT_PORT_OVERRIDE 0x1
+
+/* MC_CMD_FC_IN_MAC_RECONFIGURE msgrequest */
+#define MC_CMD_FC_IN_MAC_RECONFIGURE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_MAC_SET_LINK msgrequest */
+#define MC_CMD_FC_IN_MAC_SET_LINK_LEN 32
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MTU size */
+#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_OFST 8
+/* Drain Tx FIFO */
+#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_OFST 12
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_OFST 16
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LEN 8
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LO_OFST 16
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_HI_OFST 20
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_OFST 24
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_LBN 0
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_WIDTH 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_LBN 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_WIDTH 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_OFST 28
+
+/* MC_CMD_FC_IN_MAC_READ_STATUS msgrequest */
+#define MC_CMD_FC_IN_MAC_READ_STATUS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_RX_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_RX_STATS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_TX_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_TX_STATS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_STATS_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC Statistics index */
+#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_OFST 8
+#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_OFST 12
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_LBN 0
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_WIDTH 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_LBN 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_WIDTH 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_LBN 2
+#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_WIDTH 1
+/* Number of statistics to read */
+#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_OFST 16
+#define MC_CMD_FC_MAC_NSTATS_PER_BLOCK 0x1e /* enum */
+#define MC_CMD_FC_MAC_NBYTES_PER_STAT 0x8 /* enum */
+
+/* MC_CMD_FC_IN_READ32 msgrequest */
+#define MC_CMD_FC_IN_READ32_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_READ32_ADDR_HI_OFST 4
+#define MC_CMD_FC_IN_READ32_ADDR_LO_OFST 8
+#define MC_CMD_FC_IN_READ32_NUMWORDS_OFST 12
+
+/* MC_CMD_FC_IN_WRITE32 msgrequest */
+#define MC_CMD_FC_IN_WRITE32_LENMIN 16
+#define MC_CMD_FC_IN_WRITE32_LENMAX 252
+#define MC_CMD_FC_IN_WRITE32_LEN(num) (12+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_WRITE32_ADDR_HI_OFST 4
+#define MC_CMD_FC_IN_WRITE32_ADDR_LO_OFST 8
+#define MC_CMD_FC_IN_WRITE32_BUFFER_OFST 12
+#define MC_CMD_FC_IN_WRITE32_BUFFER_LEN 4
+#define MC_CMD_FC_IN_WRITE32_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_WRITE32_BUFFER_MAXNUM 60
+
+/* MC_CMD_FC_IN_TRC_READ msgrequest */
+#define MC_CMD_FC_IN_TRC_READ_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TRC_READ_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_READ_CHANNEL_OFST 8
+
+/* MC_CMD_FC_IN_TRC_WRITE msgrequest */
+#define MC_CMD_FC_IN_TRC_WRITE_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TRC_WRITE_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_OFST 12
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_LEN 4
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_NUM 4
+
+/* MC_CMD_FC_IN_GET_VERSION msgrequest */
+#define MC_CMD_FC_IN_GET_VERSION_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+
+/* MC_CMD_FC_IN_TRC_RX_READ msgrequest */
+#define MC_CMD_FC_IN_TRC_RX_READ_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TRC_RX_READ_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_OFST 8
+
+/* MC_CMD_FC_IN_TRC_RX_WRITE msgrequest */
+#define MC_CMD_FC_IN_TRC_RX_WRITE_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_OFST 12
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_LEN 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_NUM 2
+
+/* MC_CMD_FC_IN_SFP msgrequest */
+#define MC_CMD_FC_IN_SFP_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* Link speed is 100, 1000, 10000, 40000 */
+#define MC_CMD_FC_IN_SFP_SPEED_OFST 4
+/* Length of copper cable - zero when not relevant (e.g. if cable is fibre) */
+#define MC_CMD_FC_IN_SFP_COPPER_LEN_OFST 8
+/* Not relevant for cards with QSFP modules. For older cards, true if module is
+ * a dual speed SFP+ module.
+ */
+#define MC_CMD_FC_IN_SFP_DUAL_SPEED_OFST 12
+/* True if an SFP Module is present (other fields valid when true) */
+#define MC_CMD_FC_IN_SFP_PRESENT_OFST 16
+/* The type of the SFP+ Module. For later cards with QSFP modules, this field
+ * is unused and the type is communicated by other means.
+ */
+#define MC_CMD_FC_IN_SFP_TYPE_OFST 20
+/* Capabilities corresponding to 1 bits. */
+#define MC_CMD_FC_IN_SFP_CAPS_OFST 24
+
+/* MC_CMD_FC_IN_DDR_TEST msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4
+#define MC_CMD_FC_IN_DDR_TEST_OP_LBN 0
+#define MC_CMD_FC_IN_DDR_TEST_OP_WIDTH 8
+/* enum: DRAM Test Start */
+#define MC_CMD_FC_OP_DDR_TEST_START 0x1
+/* enum: DRAM Test Poll */
+#define MC_CMD_FC_OP_DDR_TEST_POLL 0x2
+
+/* MC_CMD_FC_IN_DDR_TEST_START msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_START_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */
+#define MC_CMD_FC_IN_DDR_TEST_START_MASK_OFST 8
+#define MC_CMD_FC_IN_DDR_TEST_START_T0_LBN 0
+#define MC_CMD_FC_IN_DDR_TEST_START_T0_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_T1_LBN 1
+#define MC_CMD_FC_IN_DDR_TEST_START_T1_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_B0_LBN 2
+#define MC_CMD_FC_IN_DDR_TEST_START_B0_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_B1_LBN 3
+#define MC_CMD_FC_IN_DDR_TEST_START_B1_WIDTH 1
+
+/* MC_CMD_FC_IN_DDR_TEST_POLL msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_POLL_LEN 12
+#define MC_CMD_FC_IN_DDR_TEST_CMD_OFST 0
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */
+/* Clear previous test result and prepare for restarting DDR test */
+#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_OFST 8
+
+/* MC_CMD_FC_IN_GET_ASSERT msgrequest */
+#define MC_CMD_FC_IN_GET_ASSERT_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+
+/* MC_CMD_FC_IN_FPGA_BUILD msgrequest */
+#define MC_CMD_FC_IN_FPGA_BUILD_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* FPGA build info operation code */
+#define MC_CMD_FC_IN_FPGA_BUILD_OP_OFST 4
+/* enum: Get the build registers */
+#define MC_CMD_FC_IN_FPGA_BUILD_BUILD 0x1
+/* enum: Get the services registers */
+#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES 0x2
+/* enum: Get the BSP version */
+#define MC_CMD_FC_IN_FPGA_BUILD_BSP_VERSION 0x3
+/* enum: Get build register for V2 (SFA974X) */
+#define MC_CMD_FC_IN_FPGA_BUILD_BUILD_V2 0x4
+/* enum: GEt the services register for V2 (SFA974X) */
+#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES_V2 0x5
+
+/* MC_CMD_FC_IN_READ_MAP msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4
+#define MC_CMD_FC_IN_READ_MAP_OP_LBN 0
+#define MC_CMD_FC_IN_READ_MAP_OP_WIDTH 8
+/* enum: Get the number of map regions */
+#define MC_CMD_FC_OP_READ_MAP_COUNT 0x1
+/* enum: Get the specified map */
+#define MC_CMD_FC_OP_READ_MAP_INDEX 0x2
+
+/* MC_CMD_FC_IN_READ_MAP_COUNT msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_COUNT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_READ_MAP_INDEX msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_INDEX_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */
+#define MC_CMD_FC_IN_MAP_INDEX_OFST 8
+
+/* MC_CMD_FC_IN_CAPABILITIES msgrequest */
+#define MC_CMD_FC_IN_CAPABILITIES_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+
+/* MC_CMD_FC_IN_GLOBAL_FLAGS msgrequest */
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_OFST 4
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_LBN 0
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_LBN 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_LBN 2
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_LBN 3
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_LBN 4
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_LBN 5
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_WIDTH 1
+
+/* MC_CMD_FC_IN_IO_REL msgrequest */
+#define MC_CMD_FC_IN_IO_REL_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_IO_REL_HEADER_OFST 4
+#define MC_CMD_FC_IN_IO_REL_OP_LBN 0
+#define MC_CMD_FC_IN_IO_REL_OP_WIDTH 8
+/* enum: Get the base address that the FC applies to relative commands */
+#define MC_CMD_FC_IN_IO_REL_GET_ADDR 0x1
+/* enum: Read data */
+#define MC_CMD_FC_IN_IO_REL_READ32 0x2
+/* enum: Write data */
+#define MC_CMD_FC_IN_IO_REL_WRITE32 0x3
+#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_LBN 8
+#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_WIDTH 8
+/* enum: Application address space */
+#define MC_CMD_FC_COMP_TYPE_APP_ADDR_SPACE 0x1
+/* enum: Flash address space */
+#define MC_CMD_FC_COMP_TYPE_FLASH 0x2
+
+/* MC_CMD_FC_IN_IO_REL_GET_ADDR msgrequest */
+#define MC_CMD_FC_IN_IO_REL_GET_ADDR_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_IO_REL_READ32 msgrequest */
+#define MC_CMD_FC_IN_IO_REL_READ32_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_OFST 8
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_OFST 12
+#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_OFST 16
+
+/* MC_CMD_FC_IN_IO_REL_WRITE32 msgrequest */
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMIN 20
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMAX 252
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LEN(num) (16+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_OFST 8
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_OFST 12
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_OFST 16
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_LEN 4
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MAXNUM 59
+
+/* MC_CMD_FC_IN_UHLINK msgrequest */
+#define MC_CMD_FC_IN_UHLINK_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_UHLINK_HEADER_OFST 4
+#define MC_CMD_FC_IN_UHLINK_OP_LBN 0
+#define MC_CMD_FC_IN_UHLINK_OP_WIDTH 8
+/* enum: Get PHY configuration info */
+#define MC_CMD_FC_OP_UHLINK_PHY 0x1
+/* enum: Get MAC configuration info */
+#define MC_CMD_FC_OP_UHLINK_MAC 0x2
+/* enum: Get Rx eye table */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE 0x3
+/* enum: Get Rx eye plot */
+#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT 0x4
+/* enum: Get Rx eye plot */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT 0x5
+/* enum: Retune Rx settings */
+#define MC_CMD_FC_OP_UHLINK_RX_TUNE 0x6
+/* enum: Set loopback mode on fpga port */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET 0x7
+/* enum: Get loopback mode config state on fpga port */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET 0x8
+#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_LBN 8
+#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_WIDTH 8
+#define MC_CMD_FC_IN_UHLINK_PORT_IDX_LBN 16
+#define MC_CMD_FC_IN_UHLINK_PORT_IDX_WIDTH 8
+#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_LBN 24
+#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_WIDTH 8
+/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are
+ * irrelevant. Port number is derived from pci_fn; passed in FC header.
+ */
+#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_DEFAULT 0x0
+/* enum: Override default port number. Port number determined by fields
+ * PORT_TYPE and PORT_IDX.
+ */
+#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_PORT_OVERRIDE 0x1
+
+/* MC_CMD_FC_OP_UHLINK_PHY msgrequest */
+#define MC_CMD_FC_OP_UHLINK_PHY_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+
+/* MC_CMD_FC_OP_UHLINK_MAC msgrequest */
+#define MC_CMD_FC_OP_UHLINK_MAC_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+
+/* MC_CMD_FC_OP_UHLINK_RX_EYE msgrequest */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_OFST 8
+#define MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK 0x30 /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT msgrequest */
+#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+
+/* MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT msgrequest */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_OFST 8
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_OFST 12
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_OFST 16
+#define MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK 0x1e /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_RX_TUNE msgrequest */
+#define MC_CMD_FC_OP_UHLINK_RX_TUNE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+
+/* MC_CMD_FC_OP_UHLINK_LOOPBACK_SET msgrequest */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_OFST 8
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PCS_SERIAL 0x0 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_PRE_CDR 0x1 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_POST_CDR 0x2 /* enum */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_OFST 12
+#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_OFF 0x0 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_ON 0x1 /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_LOOPBACK_GET msgrequest */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_OFST 8
+
+/* MC_CMD_FC_IN_SET_LINK msgrequest */
+#define MC_CMD_FC_IN_SET_LINK_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* See MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_FC_IN_SET_LINK_MODE_OFST 4
+#define MC_CMD_FC_IN_SET_LINK_SPEED_OFST 8
+#define MC_CMD_FC_IN_SET_LINK_FLAGS_OFST 12
+#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_LBN 0
+#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_WIDTH 1
+#define MC_CMD_FC_IN_SET_LINK_POWEROFF_LBN 1
+#define MC_CMD_FC_IN_SET_LINK_POWEROFF_WIDTH 1
+#define MC_CMD_FC_IN_SET_LINK_TXDIS_LBN 2
+#define MC_CMD_FC_IN_SET_LINK_TXDIS_WIDTH 1
+
+/* MC_CMD_FC_IN_LICENSE msgrequest */
+#define MC_CMD_FC_IN_LICENSE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_LICENSE_OP_OFST 4
+#define MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE 0x0 /* enum */
+#define MC_CMD_FC_IN_LICENSE_GET_KEY_STATS 0x1 /* enum */
+
+/* MC_CMD_FC_IN_STARTUP msgrequest */
+#define MC_CMD_FC_IN_STARTUP_LEN 40
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_STARTUP_BASE_OFST 4
+#define MC_CMD_FC_IN_STARTUP_LENGTH_OFST 8
+/* Length of identifier */
+#define MC_CMD_FC_IN_STARTUP_IDLENGTH_OFST 12
+/* Identifier for AOE FPGA */
+#define MC_CMD_FC_IN_STARTUP_ID_OFST 16
+#define MC_CMD_FC_IN_STARTUP_ID_LEN 1
+#define MC_CMD_FC_IN_STARTUP_ID_NUM 24
+
+/* MC_CMD_FC_IN_DMA msgrequest */
+#define MC_CMD_FC_IN_DMA_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DMA_OP_OFST 4
+#define MC_CMD_FC_IN_DMA_STOP 0x0 /* enum */
+#define MC_CMD_FC_IN_DMA_READ 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DMA_STOP msgrequest */
+#define MC_CMD_FC_IN_DMA_STOP_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DMA_OP_OFST 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_OFST 8
+
+/* MC_CMD_FC_IN_DMA_READ msgrequest */
+#define MC_CMD_FC_IN_DMA_READ_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DMA_OP_OFST 4 */
+#define MC_CMD_FC_IN_DMA_READ_OFFSET_OFST 8
+#define MC_CMD_FC_IN_DMA_READ_LENGTH_OFST 12
+
+/* MC_CMD_FC_IN_TIMED_READ msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TIMED_READ_OP_OFST 4
+#define MC_CMD_FC_IN_TIMED_READ_SET 0x0 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_GET 0x1 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR 0x2 /* enum */
+
+/* MC_CMD_FC_IN_TIMED_READ_SET msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_SET_LEN 52
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* Host supplied handle (unique) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_OFST 8
+/* Address into which to transfer data in host */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_OFST 12
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LEN 8
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LO_OFST 12
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_HI_OFST 16
+/* AOE address from which to transfer data */
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_OFST 20
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LEN 8
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LO_OFST 20
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_HI_OFST 24
+/* Length of AOE transfer (total) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_OFST 28
+/* Length of host transfer (total) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_OFST 32
+/* Offset back from aoe_address to apply operation to */
+#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_OFST 36
+/* Data to apply at offset */
+#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_OFST 40
+#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_OFST 44
+#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_LBN 0
+#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_LBN 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_LBN 2
+#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_LBN 3
+#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_WIDTH 2
+#define MC_CMD_FC_IN_TIMED_READ_SET_NONE 0x0 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_READ 0x1 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_WRITE 0x2 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_READWRITE 0x3 /* enum */
+/* Period at which reads are performed (100ms units) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_OFST 48
+
+/* MC_CMD_FC_IN_TIMED_READ_GET msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_GET_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_OFST 8
+
+/* MC_CMD_FC_IN_TIMED_READ_CLEAR msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_OFST 8
+
+/* MC_CMD_FC_IN_LOG msgrequest */
+#define MC_CMD_FC_IN_LOG_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_LOG_OP_OFST 4
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE 0x0 /* enum */
+#define MC_CMD_FC_IN_LOG_JTAG_UART 0x1 /* enum */
+
+/* MC_CMD_FC_IN_LOG_ADDR_RANGE msgrequest */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_LOG_OP_OFST 4 */
+/* Partition offset into flash */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_OFST 8
+/* Partition length */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_OFST 12
+/* Partition erase size */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_OFST 16
+
+/* MC_CMD_FC_IN_LOG_JTAG_UART msgrequest */
+#define MC_CMD_FC_IN_LOG_JTAG_UART_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_LOG_OP_OFST 4 */
+/* Enable/disable printing to JTAG UART */
+#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_OFST 8
+
+/* MC_CMD_FC_IN_CLOCK msgrequest */
+#define MC_CMD_FC_IN_CLOCK_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_CLOCK_OP_OFST 4
+#define MC_CMD_FC_IN_CLOCK_GET_TIME 0x0 /* enum */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME 0x1 /* enum */
+/* Perform a clock operation */
+#define MC_CMD_FC_IN_CLOCK_ID_OFST 8
+#define MC_CMD_FC_IN_CLOCK_STATS 0x0 /* enum */
+#define MC_CMD_FC_IN_CLOCK_MAC 0x1 /* enum */
+
+/* MC_CMD_FC_IN_CLOCK_GET_TIME msgrequest */
+#define MC_CMD_FC_IN_CLOCK_GET_TIME_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */
+/* Retrieve the clock value of the specified clock */
+/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */
+
+/* MC_CMD_FC_IN_CLOCK_SET_TIME msgrequest */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */
+/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_OFST 12
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LEN 8
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LO_OFST 12
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_HI_OFST 16
+/* Set the clock value of the specified clock */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_OFST 20
+
+/* MC_CMD_FC_IN_DDR msgrequest */
+#define MC_CMD_FC_IN_DDR_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DDR_OP_OFST 4
+#define MC_CMD_FC_IN_DDR_SET_SPD 0x0 /* enum */
+#define MC_CMD_FC_IN_DDR_GET_STATUS 0x1 /* enum */
+#define MC_CMD_FC_IN_DDR_SET_INFO 0x2 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_OFST 8
+#define MC_CMD_FC_IN_DDR_BANK_B0 0x0 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_B1 0x1 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_T0 0x2 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_T1 0x3 /* enum */
+#define MC_CMD_FC_IN_DDR_NUM_BANKS 0x4 /* enum */
+
+/* MC_CMD_FC_IN_DDR_SET_SPD msgrequest */
+#define MC_CMD_FC_IN_DDR_SET_SPD_LEN 148
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+/* Flags */
+#define MC_CMD_FC_IN_DDR_FLAGS_OFST 12
+#define MC_CMD_FC_IN_DDR_SET_SPD_ACTIVE 0x1 /* enum */
+/* 128-byte page of serial presence detect data read from module's EEPROM */
+#define MC_CMD_FC_IN_DDR_SPD_OFST 16
+#define MC_CMD_FC_IN_DDR_SPD_LEN 1
+#define MC_CMD_FC_IN_DDR_SPD_NUM 128
+/* Page index of the spd data copied into MC_CMD_FC_IN_DDR_SPD */
+#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_OFST 144
+
+/* MC_CMD_FC_IN_DDR_SET_INFO msgrequest */
+#define MC_CMD_FC_IN_DDR_SET_INFO_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+/* Size of DDR */
+#define MC_CMD_FC_IN_DDR_SIZE_OFST 12
+
+/* MC_CMD_FC_IN_DDR_GET_STATUS msgrequest */
+#define MC_CMD_FC_IN_DDR_GET_STATUS_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+
+/* MC_CMD_FC_IN_TIMESTAMP msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* FC timestamp operation code */
+#define MC_CMD_FC_IN_TIMESTAMP_OP_OFST 4
+/* enum: Read transmit timestamp(s) */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT 0x0
+/* enum: Read snapshot timestamps */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT 0x1
+/* enum: Clear all transmit timestamps */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT 0x2
+
+/* MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_OFST 4
+/* Control filtering of the returned timestamp and sequence number specified
+ * here
+ */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_OFST 8
+/* enum: Return most recent timestamp. No filtering */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LATEST 0x0
+/* enum: Match timestamp against the PTP clock ID, port number and sequence
+ * number specified
+ */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_MATCH 0x1
+/* Clock identity of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_OFST 12
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LEN 8
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LO_OFST 12
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_HI_OFST 16
+/* Port number of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_OFST 20
+/* Sequence number of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_OFST 24
+
+/* MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_OFST 4
+
+/* MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_OFST 4
+
+/* MC_CMD_FC_IN_SPI msgrequest */
+#define MC_CMD_FC_IN_SPI_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* Basic commands for SPI Flash. */
+#define MC_CMD_FC_IN_SPI_OP_OFST 4
+/* enum: SPI Flash read */
+#define MC_CMD_FC_IN_SPI_READ 0x0
+/* enum: SPI Flash write */
+#define MC_CMD_FC_IN_SPI_WRITE 0x1
+/* enum: SPI Flash erase */
+#define MC_CMD_FC_IN_SPI_ERASE 0x2
+
+/* MC_CMD_FC_IN_SPI_READ msgrequest */
+#define MC_CMD_FC_IN_SPI_READ_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_SPI_READ_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_READ_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_OFST 12
+
+/* MC_CMD_FC_IN_SPI_WRITE msgrequest */
+#define MC_CMD_FC_IN_SPI_WRITE_LENMIN 16
+#define MC_CMD_FC_IN_SPI_WRITE_LENMAX 252
+#define MC_CMD_FC_IN_SPI_WRITE_LEN(num) (12+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_SPI_WRITE_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_WRITE_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_OFST 12
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_LEN 4
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MAXNUM 60
+
+/* MC_CMD_FC_IN_SPI_ERASE msgrequest */
+#define MC_CMD_FC_IN_SPI_ERASE_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_SPI_ERASE_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_ERASE_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_OFST 12
+
+/* MC_CMD_FC_IN_DIAG msgrequest */
+#define MC_CMD_FC_IN_DIAG_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* Operation code indicating component type */
+#define MC_CMD_FC_IN_DIAG_OP_OFST 4
+/* enum: Power noise generator. */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE 0x0
+/* enum: DDR soak test component. */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK 0x1
+/* enum: Diagnostics datapath control component. */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL 0x2
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_OFST 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_OFST 8
+/* enum: Read the configuration (the 32-bit values in each of the clock enable
+ * count and toggle count registers)
+ */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG 0x0
+/* enum: Write a new configuration to the clock enable count and toggle count
+ * registers
+ */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG 0x1
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_OFST 8
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_OFST 8
+/* The 32-bit value to be written to the toggle count register */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_OFST 12
+/* The 32-bit value to be written to the clock enable count register */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_OFST 16
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_OFST 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_OFST 8
+/* enum: Starts DDR soak test on selected banks */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START 0x0
+/* enum: Read status of DDR soak test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT 0x1
+/* enum: Stop test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP 0x2
+/* enum: Set or clear bit that triggers fake errors. These cause subsequent
+ * tests to fail until the bit is cleared.
+ */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR 0x3
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_START msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_OFST 8
+/* Mask of DDR banks to be tested */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_OFST 12
+/* Pattern to use in the soak test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_OFST 16
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ZEROS 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONES 0x1 /* enum */
+/* Either multiple automatic tests until a STOP command is issued, or one
+ * single test
+ */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_OFST 20
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONGOING_TEST 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SINGLE_TEST 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_OFST 8
+/* DDR bank to read status from */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_OFST 12
+#define MC_CMD_FC_DDR_BANK0 0x0 /* enum */
+#define MC_CMD_FC_DDR_BANK1 0x1 /* enum */
+#define MC_CMD_FC_DDR_BANK2 0x2 /* enum */
+#define MC_CMD_FC_DDR_BANK3 0x3 /* enum */
+#define MC_CMD_FC_DDR_AOEMEM_MAX_BANKS 0x4 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_OFST 8
+/* Mask of DDR banks to be tested */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_OFST 12
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_OFST 8
+/* Mask of DDR banks to set/clear error flag on */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_OFST 12
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_OFST 16
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_CLEAR 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SET 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_OFST 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_OFST 8
+/* enum: Set a known datapath configuration */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE 0x0
+/* enum: Apply raw config to datapath control registers */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG 0x1
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_OFST 8
+/* Datapath configuration identifier */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_OFST 12
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_PASSTHROUGH 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SNAKE 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_OFST 8
+/* Value to write into control register 1 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_OFST 12
+/* Value to write into control register 2 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_OFST 16
+/* Value to write into control register 3 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_OFST 20
+
+/* MC_CMD_FC_OUT msgresponse */
+#define MC_CMD_FC_OUT_LEN 0
+
+/* MC_CMD_FC_OUT_NULL msgresponse */
+#define MC_CMD_FC_OUT_NULL_LEN 0
+
+/* MC_CMD_FC_OUT_READ32 msgresponse */
+#define MC_CMD_FC_OUT_READ32_LENMIN 4
+#define MC_CMD_FC_OUT_READ32_LENMAX 252
+#define MC_CMD_FC_OUT_READ32_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_READ32_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_READ32_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_READ32_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_READ32_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_WRITE32 msgresponse */
+#define MC_CMD_FC_OUT_WRITE32_LEN 0
+
+/* MC_CMD_FC_OUT_TRC_READ msgresponse */
+#define MC_CMD_FC_OUT_TRC_READ_LEN 16
+#define MC_CMD_FC_OUT_TRC_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_TRC_READ_DATA_LEN 4
+#define MC_CMD_FC_OUT_TRC_READ_DATA_NUM 4
+
+/* MC_CMD_FC_OUT_TRC_WRITE msgresponse */
+#define MC_CMD_FC_OUT_TRC_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_GET_VERSION msgresponse */
+#define MC_CMD_FC_OUT_GET_VERSION_LEN 12
+#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_OFST 4
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LEN 8
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_FC_OUT_TRC_RX_READ msgresponse */
+#define MC_CMD_FC_OUT_TRC_RX_READ_LEN 8
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_LEN 4
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_NUM 2
+
+/* MC_CMD_FC_OUT_TRC_RX_WRITE msgresponse */
+#define MC_CMD_FC_OUT_TRC_RX_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_RECONFIGURE msgresponse */
+#define MC_CMD_FC_OUT_MAC_RECONFIGURE_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_SET_LINK msgresponse */
+#define MC_CMD_FC_OUT_MAC_SET_LINK_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_READ_STATUS msgresponse */
+#define MC_CMD_FC_OUT_MAC_READ_STATUS_LEN 4
+#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_OFST 0
+
+/* MC_CMD_FC_OUT_MAC_GET_RX_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_RX_NSTATS))+1))>>3)
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_RX_NSTATS
+#define MC_CMD_FC_MAC_RX_STATS_OCTETS 0x0 /* enum */
+#define MC_CMD_FC_MAC_RX_OCTETS_OK 0x1 /* enum */
+#define MC_CMD_FC_MAC_RX_ALIGNMENT_ERRORS 0x2 /* enum */
+#define MC_CMD_FC_MAC_RX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */
+#define MC_CMD_FC_MAC_RX_FRAMES_OK 0x4 /* enum */
+#define MC_CMD_FC_MAC_RX_CRC_ERRORS 0x5 /* enum */
+#define MC_CMD_FC_MAC_RX_VLAN_OK 0x6 /* enum */
+#define MC_CMD_FC_MAC_RX_ERRORS 0x7 /* enum */
+#define MC_CMD_FC_MAC_RX_UCAST_PKTS 0x8 /* enum */
+#define MC_CMD_FC_MAC_RX_MULTICAST_PKTS 0x9 /* enum */
+#define MC_CMD_FC_MAC_RX_BROADCAST_PKTS 0xa /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_DROP_EVENTS 0xb /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS 0xc /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_UNDERSIZE_PKTS 0xd /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_64 0xe /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_65_127 0xf /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_128_255 0x10 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_256_511 0x11 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_512_1023 0x12 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_1024_1518 0x13 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_1519_MAX 0x14 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_OVERSIZE_PKTS 0x15 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_JABBERS 0x16 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_FRAGMENTS 0x17 /* enum */
+#define MC_CMD_FC_MAC_RX_MAC_CONTROL_FRAMES 0x18 /* enum */
+/* enum: (Last entry) */
+#define MC_CMD_FC_MAC_RX_NSTATS 0x19
+
+/* MC_CMD_FC_OUT_MAC_GET_TX_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_TX_NSTATS))+1))>>3)
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_TX_NSTATS
+#define MC_CMD_FC_MAC_TX_STATS_OCTETS 0x0 /* enum */
+#define MC_CMD_FC_MAC_TX_OCTETS_OK 0x1 /* enum */
+#define MC_CMD_FC_MAC_TX_ALIGNMENT_ERRORS 0x2 /* enum */
+#define MC_CMD_FC_MAC_TX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */
+#define MC_CMD_FC_MAC_TX_FRAMES_OK 0x4 /* enum */
+#define MC_CMD_FC_MAC_TX_CRC_ERRORS 0x5 /* enum */
+#define MC_CMD_FC_MAC_TX_VLAN_OK 0x6 /* enum */
+#define MC_CMD_FC_MAC_TX_ERRORS 0x7 /* enum */
+#define MC_CMD_FC_MAC_TX_UCAST_PKTS 0x8 /* enum */
+#define MC_CMD_FC_MAC_TX_MULTICAST_PKTS 0x9 /* enum */
+#define MC_CMD_FC_MAC_TX_BROADCAST_PKTS 0xa /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_DROP_EVENTS 0xb /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS 0xc /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_UNDERSIZE_PKTS 0xd /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_64 0xe /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_65_127 0xf /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_128_255 0x10 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_256_511 0x11 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_512_1023 0x12 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_1024_1518 0x13 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_1519_TX_MTU 0x14 /* enum */
+#define MC_CMD_FC_MAC_TX_MAC_CONTROL_FRAMES 0x15 /* enum */
+/* enum: (Last entry) */
+#define MC_CMD_FC_MAC_TX_NSTATS 0x16
+
+/* MC_CMD_FC_OUT_MAC_GET_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_NSTATS_PER_BLOCK))+1))>>3)
+/* MAC Statistics */
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_NUM MC_CMD_FC_MAC_NSTATS_PER_BLOCK
+
+/* MC_CMD_FC_OUT_MAC msgresponse */
+#define MC_CMD_FC_OUT_MAC_LEN 0
+
+/* MC_CMD_FC_OUT_SFP msgresponse */
+#define MC_CMD_FC_OUT_SFP_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_TEST_START msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_START_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_TEST_POLL msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_LEN 8
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_OFST 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_LBN 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_WIDTH 8
+/* enum: Test not yet initiated */
+#define MC_CMD_FC_OP_DDR_TEST_NONE 0x0
+/* enum: Test is in progress */
+#define MC_CMD_FC_OP_DDR_TEST_INPROGRESS 0x1
+/* enum: Timed completed */
+#define MC_CMD_FC_OP_DDR_TEST_SUCCESS 0x2
+/* enum: Test did not complete in specified time */
+#define MC_CMD_FC_OP_DDR_TEST_TIMER_EXPIRED 0x3
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_LBN 11
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_LBN 10
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_LBN 9
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_LBN 8
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_WIDTH 1
+/* Test result from FPGA */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_OFST 4
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_LBN 31
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_LBN 30
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_LBN 29
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_LBN 28
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_LBN 15
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_LBN 10
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_LBN 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_LBN 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_COMPLETE 0x0 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_FAIL 0x1 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_PASS 0x2 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_FAIL 0x3 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_SUCCESS 0x4 /* enum */
+
+/* MC_CMD_FC_OUT_DDR_TEST msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_LEN 0
+
+/* MC_CMD_FC_OUT_GET_ASSERT msgresponse */
+#define MC_CMD_FC_OUT_GET_ASSERT_LEN 144
+/* Assertion status flag. */
+#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_FC_OUT_GET_ASSERT_STATE_LBN 8
+#define MC_CMD_FC_OUT_GET_ASSERT_STATE_WIDTH 8
+/* enum: No crash data available */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0
+/* enum: New crash data available */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1
+/* enum: Crash data has been sent */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2
+#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_LBN 0
+#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_WIDTH 8
+/* enum: No crash has been recorded. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0
+/* enum: Crash due to exception. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1
+/* enum: Crash due to assertion. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2
+/* Failing PC value */
+#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_OFST 4
+/* Saved GP regs */
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_OFST 8
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_LEN 4
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_NUM 31
+/* Exception Type */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_OFST 132
+/* Instruction at which exception occurred */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_OFST 136
+/* BAD Address that triggered address-based exception */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_OFST 140
+
+/* MC_CMD_FC_OUT_FPGA_BUILD msgresponse */
+#define MC_CMD_FC_OUT_FPGA_BUILD_LEN 32
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_WIDTH 8
+#define MC_CMD_FC_FPGA_TYPE_A7 0xa7 /* enum */
+#define MC_CMD_FC_FPGA_TYPE_A5 0xa5 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_LBN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_WIDTH 10
+#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_LBN 18
+#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_LBN 19
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_LBN 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_LBN 21
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_LBN 22
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_LBN 23
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_LBN 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_LBN 25
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_LBN 26
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_LBN 27
+#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_LBN 29
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_WIDTH 2
+#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_WIDTH 1
+#define MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 /* enum */
+#define MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_LBN 17
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_WIDTH 15
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LEN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LO_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_HI_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_BUILD_V2 msgresponse */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_LEN 32
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_LBN 29
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_LBN 27
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_LBN 26
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_LBN 25
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_LBN 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_LBN 23
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_LBN 22
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_LBN 21
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_LBN 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_LBN 19
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_LBN 18
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_LBN 17
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_LBN 15
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_LBN 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_LBN 13
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_LBN 11
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_LBN 10
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_LBN 9
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_LBN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_LBN 7
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_LBN 6
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_LBN 5
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_WIDTH 4
+#define MC_CMD_FC_FPGA_V2_TYPE_A3 0x0 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A4 0x1 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A5 0x2 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A7 0x3 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D3 0x8 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D4 0x9 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D5 0xa /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D7 0xb /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_WIDTH 1
+/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */
+/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_SERVICES msgresponse */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_LEN 32
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_LBN 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_LBN 27
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_LBN 29
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_OFST 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_OFST 20
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_SERVICES_V2 msgresponse */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_LEN 32
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_LBN 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_WIDTH 1
+/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */
+/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_BSP_VERSION msgresponse */
+#define MC_CMD_FC_OUT_BSP_VERSION_LEN 4
+/* Qsys system ID */
+#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_OFST 0
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_WIDTH 4
+
+/* MC_CMD_FC_OUT_READ_MAP_COUNT msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_COUNT_LEN 4
+/* Number of maps */
+#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_OFST 0
+
+/* MC_CMD_FC_OUT_READ_MAP_INDEX msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN 164
+/* Index of the map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_OFST 0
+/* Options for the map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_OFST 4
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_8 0x0 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_16 0x1 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_32 0x2 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_64 0x3 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_MASK 0x3 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_FC 0x4 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_MEM 0x8 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_READ 0x10 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_WRITE 0x20 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_FREE 0x0 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_LICENSED 0x40 /* enum */
+/* Address of start of map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_OFST 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LO_OFST 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_HI_OFST 12
+/* Length of address map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_OFST 16
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LO_OFST 16
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_HI_OFST 20
+/* Component information field */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_OFST 24
+/* License expiry data for map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_OFST 28
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LO_OFST 28
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_HI_OFST 32
+/* Name of the component */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_OFST 36
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_LEN 1
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_NUM 128
+
+/* MC_CMD_FC_OUT_READ_MAP msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_LEN 0
+
+/* MC_CMD_FC_OUT_CAPABILITIES msgresponse */
+#define MC_CMD_FC_OUT_CAPABILITIES_LEN 8
+/* Number of internal ports */
+#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_OFST 0
+/* Number of external ports */
+#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_OFST 4
+
+/* MC_CMD_FC_OUT_GLOBAL_FLAGS msgresponse */
+#define MC_CMD_FC_OUT_GLOBAL_FLAGS_LEN 4
+#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_OFST 0
+
+/* MC_CMD_FC_OUT_IO_REL msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_LEN 0
+
+/* MC_CMD_FC_OUT_IO_REL_GET_ADDR msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_LEN 8
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_OFST 0
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_OFST 4
+
+/* MC_CMD_FC_OUT_IO_REL_READ32 msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_READ32_LENMIN 4
+#define MC_CMD_FC_OUT_IO_REL_READ32_LENMAX 252
+#define MC_CMD_FC_OUT_IO_REL_READ32_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_IO_REL_WRITE32 msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_WRITE32_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_PHY msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_PHY_LEN 48
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_WIDTH 16
+/* Transceiver Transmit settings */
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_WIDTH 16
+/* Transceiver Receive settings */
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_OFST 8
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_WIDTH 16
+/* Rx eye opening */
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_WIDTH 16
+/* PCS status word */
+#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_OFST 16
+/* Link status word */
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_OFST 20
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_LBN 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_WIDTH 1
+/* Current SFp parameters applied */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_OFST 24
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_LEN 20
+/* Link speed is 100, 1000, 10000 */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_OFST 24
+/* Length of copper cable - zero when not relevant */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_OFST 28
+/* True if a dual speed SFP+ module */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_OFST 32
+/* True if an SFP Module is present (other fields valid when true) */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_OFST 36
+/* The type of the SFP+ Module */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_OFST 40
+/* PHY config flags */
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_OFST 44
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_LBN 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_LBN 2
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_WIDTH 1
+
+/* MC_CMD_FC_OUT_UHLINK_MAC msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_MAC_LEN 20
+/* MAC configuration applied */
+#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_OFST 0
+/* MTU size */
+#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_OFST 4
+/* IF Mode status */
+#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_OFST 8
+/* MAC address configured */
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LEN 8
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LO_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_HI_OFST 16
+
+/* MC_CMD_FC_OUT_UHLINK_RX_EYE msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_LEN ((((0-1+(32*MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK))+1))>>3)
+/* Rx Eye measurements */
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_NUM MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK
+
+/* MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_LEN ((((32-1+(64*MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK))+1))>>3)
+/* Has the eye plot dump completed and data returned is valid? */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_OFST 0
+/* Rx Eye binary plot */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LEN 8
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LO_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_HI_OFST 8
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_NUM MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK
+
+/* MC_CMD_FC_OUT_UHLINK_RX_TUNE msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_RX_TUNE_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_OFST 0
+
+/* MC_CMD_FC_OUT_UHLINK msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LEN 0
+
+/* MC_CMD_FC_OUT_SET_LINK msgresponse */
+#define MC_CMD_FC_OUT_SET_LINK_LEN 0
+
+/* MC_CMD_FC_OUT_LICENSE msgresponse */
+#define MC_CMD_FC_OUT_LICENSE_LEN 12
+/* Count of valid keys */
+#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_OFST 0
+/* Count of invalid keys */
+#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_OFST 4
+/* Count of blacklisted keys */
+#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_OFST 8
+
+/* MC_CMD_FC_OUT_STARTUP msgresponse */
+#define MC_CMD_FC_OUT_STARTUP_LEN 4
+/* Capabilities of the FPGA/FC */
+#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_OFST 0
+#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_LBN 0
+#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_WIDTH 1
+
+/* MC_CMD_FC_OUT_DMA_READ msgresponse */
+#define MC_CMD_FC_OUT_DMA_READ_LENMIN 1
+#define MC_CMD_FC_OUT_DMA_READ_LENMAX 252
+#define MC_CMD_FC_OUT_DMA_READ_LEN(num) (0+1*(num))
+/* The data read */
+#define MC_CMD_FC_OUT_DMA_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_DMA_READ_DATA_LEN 1
+#define MC_CMD_FC_OUT_DMA_READ_DATA_MINNUM 1
+#define MC_CMD_FC_OUT_DMA_READ_DATA_MAXNUM 252
+
+/* MC_CMD_FC_OUT_TIMED_READ_SET msgresponse */
+#define MC_CMD_FC_OUT_TIMED_READ_SET_LEN 4
+/* Timer handle */
+#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_OFST 0
+
+/* MC_CMD_FC_OUT_TIMED_READ_GET msgresponse */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_LEN 52
+/* Host supplied handle (unique) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_OFST 0
+/* Address into which to transfer data in host */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_OFST 4
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LO_OFST 4
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_HI_OFST 8
+/* AOE address from which to transfer data */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_OFST 12
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LO_OFST 12
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_HI_OFST 16
+/* Length of AOE transfer (total) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_OFST 20
+/* Length of host transfer (total) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_OFST 24
+/* See FLAGS entry for MC_CMD_FC_IN_TIMED_READ_SET */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_OFST 28
+#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_OFST 32
+/* When active, start read time */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_OFST 36
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LO_OFST 36
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_HI_OFST 40
+/* When active, end read time */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_OFST 44
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LO_OFST 44
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_HI_OFST 48
+
+/* MC_CMD_FC_OUT_LOG_ADDR_RANGE msgresponse */
+#define MC_CMD_FC_OUT_LOG_ADDR_RANGE_LEN 0
+
+/* MC_CMD_FC_OUT_LOG msgresponse */
+#define MC_CMD_FC_OUT_LOG_LEN 0
+
+/* MC_CMD_FC_OUT_CLOCK_GET_TIME msgresponse */
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_LEN 24
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_OFST 0
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_OFST 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LEN 8
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LO_OFST 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_HI_OFST 8
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_OFST 12
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_OFST 16
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_OFST 20
+
+/* MC_CMD_FC_OUT_CLOCK_SET_TIME msgresponse */
+#define MC_CMD_FC_OUT_CLOCK_SET_TIME_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_SET_SPD msgresponse */
+#define MC_CMD_FC_OUT_DDR_SET_SPD_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_SET_INFO msgresponse */
+#define MC_CMD_FC_OUT_DDR_SET_INFO_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_GET_STATUS msgresponse */
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_LEN 4
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_OFST 0
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_LBN 0
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_LBN 1
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_WIDTH 1
+
+/* MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT msgresponse */
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_LEN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_OFST 4
+
+/* MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT msgresponse */
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMIN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMAX 248
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LEN(num) (0+8*(num))
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_OFST 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LEN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LO_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_HI_OFST 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MINNUM 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MAXNUM 31
+
+/* MC_CMD_FC_OUT_SPI_READ msgresponse */
+#define MC_CMD_FC_OUT_SPI_READ_LENMIN 4
+#define MC_CMD_FC_OUT_SPI_READ_LENMAX 252
+#define MC_CMD_FC_OUT_SPI_READ_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_SPI_WRITE msgresponse */
+#define MC_CMD_FC_OUT_SPI_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_SPI_ERASE msgresponse */
+#define MC_CMD_FC_OUT_SPI_ERASE_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_LEN 8
+/* The 32-bit value read from the toggle count register */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_OFST 0
+/* The 32-bit value read from the clock enable count register */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_OFST 4
+
+/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_START msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_START_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_LEN 8
+/* DDR soak test status word; bits [4:0] are relevant. */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_OFST 0
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_LBN 0
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_LBN 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_LBN 2
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_LBN 3
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_LBN 4
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_WIDTH 1
+/* DDR soak test error count */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_OFST 4
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 0
+
+
+/***********************************/
+/* MC_CMD_AOE
+ * AOE operations on MC
+ */
+#define MC_CMD_AOE 0xa
+
+/* MC_CMD_AOE_IN msgrequest */
+#define MC_CMD_AOE_IN_LEN 4
+#define MC_CMD_AOE_IN_OP_HDR_OFST 0
+#define MC_CMD_AOE_IN_OP_LBN 0
+#define MC_CMD_AOE_IN_OP_WIDTH 8
+/* enum: FPGA and CPLD information */
+#define MC_CMD_AOE_OP_INFO 0x1
+/* enum: Currents and voltages read from MCP3424s; DEBUG */
+#define MC_CMD_AOE_OP_CURRENTS 0x2
+/* enum: Temperatures at locations around the PCB; DEBUG */
+#define MC_CMD_AOE_OP_TEMPERATURES 0x3
+/* enum: Set CPLD to idle */
+#define MC_CMD_AOE_OP_CPLD_IDLE 0x4
+/* enum: Read from CPLD register */
+#define MC_CMD_AOE_OP_CPLD_READ 0x5
+/* enum: Write to CPLD register */
+#define MC_CMD_AOE_OP_CPLD_WRITE 0x6
+/* enum: Execute CPLD instruction */
+#define MC_CMD_AOE_OP_CPLD_INSTRUCTION 0x7
+/* enum: Reprogram the CPLD on the AOE device */
+#define MC_CMD_AOE_OP_CPLD_REPROGRAM 0x8
+/* enum: AOE power control */
+#define MC_CMD_AOE_OP_POWER 0x9
+/* enum: AOE image loading */
+#define MC_CMD_AOE_OP_LOAD 0xa
+/* enum: Fan monitoring */
+#define MC_CMD_AOE_OP_FAN_CONTROL 0xb
+/* enum: Fan failures since last reset */
+#define MC_CMD_AOE_OP_FAN_FAILURES 0xc
+/* enum: Get generic AOE MAC statistics */
+#define MC_CMD_AOE_OP_MAC_STATS 0xd
+/* enum: Retrieve PHY specific information */
+#define MC_CMD_AOE_OP_GET_PHY_MEDIA_INFO 0xe
+/* enum: Write a number of JTAG primitive commands, return will give data */
+#define MC_CMD_AOE_OP_JTAG_WRITE 0xf
+/* enum: Control access to the FPGA via the Siena JTAG Chain */
+#define MC_CMD_AOE_OP_FPGA_ACCESS 0x10
+/* enum: Set the MTU offset between Siena and AOE MACs */
+#define MC_CMD_AOE_OP_SET_MTU_OFFSET 0x11
+/* enum: How link state is handled */
+#define MC_CMD_AOE_OP_LINK_STATE 0x12
+/* enum: How Siena MAC statistics are reported (deprecated - use
+ * MC_CMD_AOE_OP_ASIC_STATS)
+ */
+#define MC_CMD_AOE_OP_SIENA_STATS 0x13
+/* enum: How native ASIC MAC statistics are reported - replaces the deprecated
+ * command MC_CMD_AOE_OP_SIENA_STATS
+ */
+#define MC_CMD_AOE_OP_ASIC_STATS 0x13
+/* enum: DDR memory information */
+#define MC_CMD_AOE_OP_DDR 0x14
+/* enum: FC control */
+#define MC_CMD_AOE_OP_FC 0x15
+/* enum: DDR ECC status reads */
+#define MC_CMD_AOE_OP_DDR_ECC_STATUS 0x16
+/* enum: Commands for MC-SPI Master emulation */
+#define MC_CMD_AOE_OP_MC_SPI_MASTER 0x17
+/* enum: Commands for FC boot control */
+#define MC_CMD_AOE_OP_FC_BOOT 0x18
+
+/* MC_CMD_AOE_OUT msgresponse */
+#define MC_CMD_AOE_OUT_LEN 0
+
+/* MC_CMD_AOE_IN_INFO msgrequest */
+#define MC_CMD_AOE_IN_INFO_LEN 4
+#define MC_CMD_AOE_IN_CMD_OFST 0
+
+/* MC_CMD_AOE_IN_CURRENTS msgrequest */
+#define MC_CMD_AOE_IN_CURRENTS_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_TEMPERATURES msgrequest */
+#define MC_CMD_AOE_IN_TEMPERATURES_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_CPLD_IDLE msgrequest */
+#define MC_CMD_AOE_IN_CPLD_IDLE_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_CPLD_READ msgrequest */
+#define MC_CMD_AOE_IN_CPLD_READ_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_OFST 4
+#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_OFST 8
+
+/* MC_CMD_AOE_IN_CPLD_WRITE msgrequest */
+#define MC_CMD_AOE_IN_CPLD_WRITE_LEN 16
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_OFST 4
+#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_OFST 8
+#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_OFST 12
+
+/* MC_CMD_AOE_IN_CPLD_INSTRUCTION msgrequest */
+#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_OFST 4
+
+/* MC_CMD_AOE_IN_CPLD_REPROGRAM msgrequest */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_OFST 4
+/* enum: Reprogram CPLD, poll for completion */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM 0x1
+/* enum: Reprogram CPLD, send event on completion */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM_EVENT 0x3
+/* enum: Get status of reprogramming operation */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_STATUS 0x4
+
+/* MC_CMD_AOE_IN_POWER msgrequest */
+#define MC_CMD_AOE_IN_POWER_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* Turn on or off AOE power */
+#define MC_CMD_AOE_IN_POWER_OP_OFST 4
+/* enum: Turn off FPGA power */
+#define MC_CMD_AOE_IN_POWER_OFF 0x0
+/* enum: Turn on FPGA power */
+#define MC_CMD_AOE_IN_POWER_ON 0x1
+/* enum: Clear peak power measurement */
+#define MC_CMD_AOE_IN_POWER_CLEAR 0x2
+/* enum: Show current power in sensors output */
+#define MC_CMD_AOE_IN_POWER_SHOW_CURRENT 0x3
+/* enum: Show peak power in sensors output */
+#define MC_CMD_AOE_IN_POWER_SHOW_PEAK 0x4
+/* enum: Show current DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_LAST 0x5
+/* enum: Show peak DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_PEAK 0x6
+/* enum: Clear peak DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_CLEAR 0x7
+
+/* MC_CMD_AOE_IN_LOAD msgrequest */
+#define MC_CMD_AOE_IN_LOAD_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* Image to be loaded (0 - main or 1 - diagnostic) to load in normal sequence
+ */
+#define MC_CMD_AOE_IN_LOAD_IMAGE_OFST 4
+
+/* MC_CMD_AOE_IN_FAN_CONTROL msgrequest */
+#define MC_CMD_AOE_IN_FAN_CONTROL_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* If non zero report measured fan RPM rather than nominal */
+#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_OFST 4
+
+/* MC_CMD_AOE_IN_FAN_FAILURES msgrequest */
+#define MC_CMD_AOE_IN_FAN_FAILURES_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_MAC_STATS msgrequest */
+#define MC_CMD_AOE_IN_MAC_STATS_LEN 24
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* AOE port */
+#define MC_CMD_AOE_IN_MAC_STATS_PORT_OFST 4
+/* Host memory address for statistics */
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_OFST 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LEN 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LO_OFST 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_HI_OFST 12
+#define MC_CMD_AOE_IN_MAC_STATS_CMD_OFST 16
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_LBN 0
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_LBN 1
+#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_LBN 16
+#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_WIDTH 16
+/* Length of DMA data (optional) */
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_OFST 20
+
+/* MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO msgrequest */
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* AOE port */
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_OFST 4
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_OFST 8
+
+/* MC_CMD_AOE_IN_JTAG_WRITE msgrequest */
+#define MC_CMD_AOE_IN_JTAG_WRITE_LENMIN 12
+#define MC_CMD_AOE_IN_JTAG_WRITE_LENMAX 252
+#define MC_CMD_AOE_IN_JTAG_WRITE_LEN(num) (8+4*(num))
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_OFST 4
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_OFST 8
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_LEN 4
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MINNUM 1
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MAXNUM 61
+
+/* MC_CMD_AOE_IN_FPGA_ACCESS msgrequest */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* Enable or disable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_OFST 4
+/* enum: Enable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_ENABLE 0x1
+/* enum: Disable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_DISABLE 0x2
+
+/* MC_CMD_AOE_IN_SET_MTU_OFFSET msgrequest */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* AOE port - when not ALL_EXTERNAL or ALL_INTERNAL specifies port number */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_OFST 4
+/* enum: Apply to all external ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_EXTERNAL 0x8000
+/* enum: Apply to all internal ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_INTERNAL 0x4000
+/* The MTU offset to be applied to the external ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_OFST 8
+
+/* MC_CMD_AOE_IN_LINK_STATE msgrequest */
+#define MC_CMD_AOE_IN_LINK_STATE_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_LINK_STATE_MODE_OFST 4
+#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_LBN 0
+#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_WIDTH 8
+/* enum: AOE and associated external port */
+#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_SEPARATE 0x0
+/* enum: AOE and OR of all external ports */
+#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_COMBINED 0x1
+/* enum: Individual ports */
+#define MC_CMD_AOE_IN_LINK_STATE_DIAGNOSTIC 0x2
+/* enum: Configure link state mode on given AOE port */
+#define MC_CMD_AOE_IN_LINK_STATE_CUSTOM 0x3
+#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_LBN 8
+#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_WIDTH 8
+/* enum: No-op */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_NONE 0x0
+/* enum: logical OR of all SFP ports link status */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_OR 0x1
+/* enum: logical AND of all SFP ports link status */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_AND 0x2
+#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_LBN 16
+#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_WIDTH 16
+
+/* MC_CMD_AOE_IN_SIENA_STATS msgrequest */
+#define MC_CMD_AOE_IN_SIENA_STATS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* How MAC statistics are reported */
+#define MC_CMD_AOE_IN_SIENA_STATS_MODE_OFST 4
+/* enum: Statistics from Siena (default) */
+#define MC_CMD_AOE_IN_SIENA_STATS_STATS_SIENA 0x0
+/* enum: Statistics from AOE external ports */
+#define MC_CMD_AOE_IN_SIENA_STATS_STATS_AOE 0x1
+
+/* MC_CMD_AOE_IN_ASIC_STATS msgrequest */
+#define MC_CMD_AOE_IN_ASIC_STATS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* How MAC statistics are reported */
+#define MC_CMD_AOE_IN_ASIC_STATS_MODE_OFST 4
+/* enum: Statistics from the ASIC (default) */
+#define MC_CMD_AOE_IN_ASIC_STATS_STATS_ASIC 0x0
+/* enum: Statistics from AOE external ports */
+#define MC_CMD_AOE_IN_ASIC_STATS_STATS_AOE 0x1
+
+/* MC_CMD_AOE_IN_DDR msgrequest */
+#define MC_CMD_AOE_IN_DDR_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_DDR_BANK_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */
+/* Page index of SPD data */
+#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_OFST 8
+
+/* MC_CMD_AOE_IN_FC msgrequest */
+#define MC_CMD_AOE_IN_FC_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_DDR_ECC_STATUS msgrequest */
+#define MC_CMD_AOE_IN_DDR_ECC_STATUS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* Basic commands for MC SPI Master emulation. */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_OFST 4
+/* enum: MC SPI read */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ 0x0
+/* enum: MC SPI write */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE 0x1
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER_READ msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_OFST 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_OFST 8
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_LEN 16
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_OFST 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_OFST 8
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_OFST 12
+
+/* MC_CMD_AOE_IN_FC_BOOT msgrequest */
+#define MC_CMD_AOE_IN_FC_BOOT_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* FC boot control flags */
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_OFST 4
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_LBN 0
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_WIDTH 1
+
+/* MC_CMD_AOE_OUT_INFO msgresponse */
+#define MC_CMD_AOE_OUT_INFO_LEN 44
+/* JTAG IDCODE of CPLD */
+#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_OFST 0
+/* Version of CPLD */
+#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_OFST 4
+/* JTAG IDCODE of FPGA */
+#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_OFST 8
+/* JTAG USERCODE of FPGA */
+#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_OFST 12
+/* FPGA type - read from CPLD straps */
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_OFST 16
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A5_C2 0x1 /* enum */
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A7_C2 0x2 /* enum */
+/* FPGA state (debug) */
+#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_OFST 20
+/* FPGA image - partition from which loaded */
+#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_OFST 24
+/* FC state */
+#define MC_CMD_AOE_OUT_INFO_FC_STATE_OFST 28
+/* enum: Set if watchdog working */
+#define MC_CMD_AOE_OUT_INFO_WATCHDOG 0x1
+/* enum: Set if MC-FC communications working */
+#define MC_CMD_AOE_OUT_INFO_COMMS 0x2
+/* Random pieces of information */
+#define MC_CMD_AOE_OUT_INFO_FLAGS_OFST 32
+/* enum: Power to FPGA supplied by PEG connector, not PCIe bus */
+#define MC_CMD_AOE_OUT_INFO_PEG_POWER 0x1
+/* enum: CPLD apparently good */
+#define MC_CMD_AOE_OUT_INFO_CPLD_GOOD 0x2
+/* enum: FPGA working normally */
+#define MC_CMD_AOE_OUT_INFO_FPGA_GOOD 0x4
+/* enum: FPGA is powered */
+#define MC_CMD_AOE_OUT_INFO_FPGA_POWER 0x8
+/* enum: Board has incompatible SODIMMs fitted */
+#define MC_CMD_AOE_OUT_INFO_BAD_SODIMM 0x10
+/* enum: Board has ByteBlaster connected */
+#define MC_CMD_AOE_OUT_INFO_HAS_BYTEBLASTER 0x20
+/* enum: FPGA Boot flash has an invalid header. */
+#define MC_CMD_AOE_OUT_INFO_FPGA_BAD_BOOT_HDR 0x40
+/* enum: FPGA Application flash is accessible. */
+#define MC_CMD_AOE_OUT_INFO_FPGA_APP_FLASH_GOOD 0x80
+/* Revision of Modena and Sorrento boards. Sorrento can be R1_2 or R1_3. */
+#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_OFST 36
+#define MC_CMD_AOE_OUT_INFO_UNKNOWN 0x0 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_0 0x10 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_1 0x11 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_2 0x12 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_3 0x13 /* enum */
+/* Result of FC booting - not valid while a ByteBlaster is connected. */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_OFST 40
+/* enum: No error */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_NO_ERROR 0x0
+/* enum: Bad address set in CPLD */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_ADDRESS 0x1
+/* enum: Bad header */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_MAGIC 0x2
+/* enum: Bad text section details */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_TEXT 0x3
+/* enum: Bad checksum */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_CHECKSUM 0x4
+/* enum: Bad BSP */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_BSP 0x5
+/* enum: Flash mode is invalid */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_INVALID_FLASH_MODE 0x6
+/* enum: FC application loaded and execution attempted */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_EXECUTE 0x80
+/* enum: FC application Started */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_STARTED 0x81
+/* enum: No bootrom in FPGA */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_NO_BOOTROM 0xff
+
+/* MC_CMD_AOE_OUT_CURRENTS msgresponse */
+#define MC_CMD_AOE_OUT_CURRENTS_LEN 68
+/* Set of currents and voltages (mA or mV as appropriate) */
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_OFST 0
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_LEN 4
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_NUM 17
+#define MC_CMD_AOE_OUT_CURRENTS_I_2V5 0x0 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_1V8 0x1 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_GXB 0x2 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_PGM 0x3 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_XCVR 0x4 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_1V5 0x5 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_3V3 0x6 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_1V5 0x7 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_IN 0x8 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT 0x9 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_IN 0xa /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR1 0xb /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR1 0xc /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR2 0xd /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR2 0xe /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR3 0xf /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR3 0x10 /* enum */
+
+/* MC_CMD_AOE_OUT_TEMPERATURES msgresponse */
+#define MC_CMD_AOE_OUT_TEMPERATURES_LEN 40
+/* Set of temperatures */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_OFST 0
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_LEN 4
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_NUM 10
+/* enum: The first set of enum values are for Modena code. */
+#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_0 0x0
+#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_1 0x1 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_IND_0 0x2 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_IND_1 0x3 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO1 0x4 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO2 0x5 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO3 0x6 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_PSU 0x7 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_FPGA 0x8 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SIENA 0x9 /* enum */
+/* enum: The second set of enum values are for Sorrento code. */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_0 0x0
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_1 0x1 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_0 0x2 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_1 0x3 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_0 0x4 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_1 0x5 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_FPGA 0x6 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY0 0x7 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY1 0x8 /* enum */
+
+/* MC_CMD_AOE_OUT_CPLD_READ msgresponse */
+#define MC_CMD_AOE_OUT_CPLD_READ_LEN 4
+/* The value read from the CPLD */
+#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_OFST 0
+
+/* MC_CMD_AOE_OUT_FAN_FAILURES msgresponse */
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMIN 4
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMAX 252
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LEN(num) (0+4*(num))
+/* Failure counts for each fan */
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_OFST 0
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_LEN 4
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MINNUM 1
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MAXNUM 63
+
+/* MC_CMD_AOE_OUT_CPLD_REPROGRAM msgresponse */
+#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_LEN 4
+/* Results of status command (only) */
+#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_OFST 0
+
+/* MC_CMD_AOE_OUT_POWER_OFF msgresponse */
+#define MC_CMD_AOE_OUT_POWER_OFF_LEN 0
+
+/* MC_CMD_AOE_OUT_POWER_ON msgresponse */
+#define MC_CMD_AOE_OUT_POWER_ON_LEN 0
+
+/* MC_CMD_AOE_OUT_LOAD msgresponse */
+#define MC_CMD_AOE_OUT_LOAD_LEN 0
+
+/* MC_CMD_AOE_OUT_MAC_STATS_DMA msgresponse */
+#define MC_CMD_AOE_OUT_MAC_STATS_DMA_LEN 0
+
+/* MC_CMD_AOE_OUT_MAC_STATS_NO_DMA msgresponse: See MC_CMD_MAC_STATS_OUT_NO_DMA
+ * for details
+ */
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+
+/* MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO msgresponse */
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMIN 5
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMAX 252
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_OFST 4
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_LEN 1
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MINNUM 1
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MAXNUM 248
+
+/* MC_CMD_AOE_OUT_JTAG_WRITE msgresponse */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMIN 12
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMAX 252
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LEN(num) (8+4*(num))
+/* Used to align the in and out data blocks so the MC can re-use the cmd */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_OFST 0
+/* out bytes */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_OFST 4
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_OFST 8
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_LEN 4
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MINNUM 1
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MAXNUM 61
+
+/* MC_CMD_AOE_OUT_FPGA_ACCESS msgresponse */
+#define MC_CMD_AOE_OUT_FPGA_ACCESS_LEN 0
+
+/* MC_CMD_AOE_OUT_DDR msgresponse */
+#define MC_CMD_AOE_OUT_DDR_LENMIN 17
+#define MC_CMD_AOE_OUT_DDR_LENMAX 252
+#define MC_CMD_AOE_OUT_DDR_LEN(num) (16+1*(num))
+/* Information on the module. */
+#define MC_CMD_AOE_OUT_DDR_FLAGS_OFST 0
+#define MC_CMD_AOE_OUT_DDR_PRESENT_LBN 0
+#define MC_CMD_AOE_OUT_DDR_PRESENT_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_POWERED_LBN 1
+#define MC_CMD_AOE_OUT_DDR_POWERED_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_LBN 2
+#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_LBN 3
+#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_WIDTH 1
+/* Memory size, in MB. */
+#define MC_CMD_AOE_OUT_DDR_CAPACITY_OFST 4
+/* The memory type, as reported from SPD information */
+#define MC_CMD_AOE_OUT_DDR_TYPE_OFST 8
+/* Nominal voltage of the module (as applied) */
+#define MC_CMD_AOE_OUT_DDR_VOLTAGE_OFST 12
+/* SPD data read from the module */
+#define MC_CMD_AOE_OUT_DDR_SPD_OFST 16
+#define MC_CMD_AOE_OUT_DDR_SPD_LEN 1
+#define MC_CMD_AOE_OUT_DDR_SPD_MINNUM 1
+#define MC_CMD_AOE_OUT_DDR_SPD_MAXNUM 236
+
+/* MC_CMD_AOE_OUT_SET_MTU_OFFSET msgresponse */
+#define MC_CMD_AOE_OUT_SET_MTU_OFFSET_LEN 0
+
+/* MC_CMD_AOE_OUT_LINK_STATE msgresponse */
+#define MC_CMD_AOE_OUT_LINK_STATE_LEN 0
+
+/* MC_CMD_AOE_OUT_SIENA_STATS msgresponse */
+#define MC_CMD_AOE_OUT_SIENA_STATS_LEN 0
+
+/* MC_CMD_AOE_OUT_ASIC_STATS msgresponse */
+#define MC_CMD_AOE_OUT_ASIC_STATS_LEN 0
+
+/* MC_CMD_AOE_OUT_FC msgresponse */
+#define MC_CMD_AOE_OUT_FC_LEN 0
+
+/* MC_CMD_AOE_OUT_DDR_ECC_STATUS msgresponse */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_LEN 8
+/* Flags describing status info on the module. */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_OFST 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_LBN 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_WIDTH 1
+/* DDR ECC status on the module. */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_OFST 4
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_LBN 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_LBN 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_LBN 2
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_LBN 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_WIDTH 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_LBN 16
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_WIDTH 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_LBN 24
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_WIDTH 8
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER_READ msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_LEN 4
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_OFST 0
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE_LEN 0
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_LEN 0
+
+/* MC_CMD_AOE_OUT_FC_BOOT msgresponse */
+#define MC_CMD_AOE_OUT_FC_BOOT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PTP
+ * Perform PTP operation
+ */
+#define MC_CMD_PTP 0xb
+#undef MC_CMD_0xb_PRIVILEGE_CTG
+
+#define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PTP_IN msgrequest */
+#define MC_CMD_PTP_IN_LEN 1
+/* PTP operation code */
+#define MC_CMD_PTP_IN_OP_OFST 0
+#define MC_CMD_PTP_IN_OP_LEN 1
+/* enum: Enable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_ENABLE 0x1
+/* enum: Disable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_DISABLE 0x2
+/* enum: Send a PTP packet. */
+#define MC_CMD_PTP_OP_TRANSMIT 0x3
+/* enum: Read the current NIC time. */
+#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4
+/* enum: Get the current PTP status. */
+#define MC_CMD_PTP_OP_STATUS 0x5
+/* enum: Adjust the PTP NIC's time. */
+#define MC_CMD_PTP_OP_ADJUST 0x6
+/* enum: Synchronize host and NIC time. */
+#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7
+/* enum: Basic manufacturing tests. */
+#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
+/* enum: Packet based manufacturing tests. */
+#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
+/* enum: Reset some of the PTP related statistics */
+#define MC_CMD_PTP_OP_RESET_STATS 0xa
+/* enum: Debug operations to MC. */
+#define MC_CMD_PTP_OP_DEBUG 0xb
+/* enum: Read an FPGA register */
+#define MC_CMD_PTP_OP_FPGAREAD 0xc
+/* enum: Write an FPGA register */
+#define MC_CMD_PTP_OP_FPGAWRITE 0xd
+/* enum: Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
+/* enum: Change Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
+/* enum: Set the MC packet filter VLAN tags for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
+/* enum: Set the MC packet filter UUID for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
+/* enum: Set the MC packet filter Domain for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
+/* enum: Set the clock source */
+#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13
+/* enum: Reset value of Timer Reg. */
+#define MC_CMD_PTP_OP_RST_CLK 0x14
+/* enum: Enable the forwarding of PPS events to the host */
+#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
+/* enum: Get the time format used by this NIC for PTP operations */
+#define MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16
+/* enum: Get the clock attributes. NOTE- extended version of
+ * MC_CMD_PTP_OP_GET_TIME_FORMAT
+ */
+#define MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16
+/* enum: Get corrections that should be applied to the various different
+ * timestamps
+ */
+#define MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17
+/* enum: Subscribe to receive periodic time events indicating the current NIC
+ * time
+ */
+#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18
+/* enum: Unsubscribe to stop receiving time events */
+#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
+/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
+ * input on the same NIC.
+ */
+#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
+/* enum: Set the PTP sync status. Status is used by firmware to report to event
+ * subscribers.
+ */
+#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
+/* enum: Above this for future use. */
+#define MC_CMD_PTP_OP_MAX 0x1c
+
+/* MC_CMD_PTP_IN_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_ENABLE_LEN 16
+#define MC_CMD_PTP_IN_CMD_OFST 0
+#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
+/* Event queue for PTP events */
+#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
+/* PTP timestamping mode */
+#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
+/* enum: PTP, version 1 */
+#define MC_CMD_PTP_MODE_V1 0x0
+/* enum: PTP, version 1, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V1_VLAN 0x1
+/* enum: PTP, version 2 */
+#define MC_CMD_PTP_MODE_V2 0x2
+/* enum: PTP, version 2, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V2_VLAN 0x3
+/* enum: PTP, version 2, with improved UUID filtering */
+#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4
+/* enum: FCoE (seconds and microseconds) */
+#define MC_CMD_PTP_MODE_FCOE 0x5
+
+/* MC_CMD_PTP_IN_DISABLE msgrequest */
+#define MC_CMD_PTP_IN_DISABLE_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
+#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
+#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
+#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Transmit packet length */
+#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+/* Transmit packet data */
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
+#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_STATUS msgrequest */
+#define MC_CMD_PTP_IN_STATUS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_ADJUST_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
+
+/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Number of time readings to capture */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+/* Host address in which to write "synchronization started" indication (64
+ * bits)
+ */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16
+
+/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Enable or disable packet testing */
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
+
+/* MC_CMD_PTP_IN_RESET_STATS msgrequest */
+#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Reset PTP statistics */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_DEBUG msgrequest */
+#define MC_CMD_PTP_IN_DEBUG_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Debug operations */
+#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+
+/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
+#define MC_CMD_PTP_IN_FPGAREAD_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+
+/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
+#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
+
+/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+
+/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Number of VLAN tags, 0 if not VLAN */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+/* Set of VLAN tags to filter against */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3
+
+/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable UUID filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+/* UUID to filter against */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16
+
+/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable Domain filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+/* Domain number to filter against */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+
+/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Set the clock source. */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+/* enum: Internal. */
+#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1
+
+/* MC_CMD_PTP_IN_RST_CLK msgrequest */
+#define MC_CMD_PTP_IN_RST_CLK_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Reset value of Timer Reg. */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Enable or disable */
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+/* enum: Enable */
+#define MC_CMD_PTP_ENABLE_PPS 0x0
+/* enum: Disable */
+#define MC_CMD_PTP_DISABLE_PPS 0x1
+/* Queue id to send events back */
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+
+/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
+#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
+#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
+#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Original field containing queue ID. Now extended to include flags. */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1
+
+/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Unsubscribe options */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+/* enum: Unsubscribe a single queue */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
+/* enum: Unsubscribe all queues */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
+/* Event queue ID */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+
+/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable PPS test mode, 0 to disable and return result. */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+
+/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* NIC - Host System Clock Synchronization status */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+/* enum: Host System clock and NIC clock are not in sync */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
+/* enum: Host System clock and NIC clock are synchronized */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1
+/* If synchronized, number of seconds until clocks should be considered to be
+ * no longer in sync.
+ */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+
+/* MC_CMD_PTP_OUT msgresponse */
+#define MC_CMD_PTP_OUT_LEN 0
+
+/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */
+#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
+
+/* MC_CMD_PTP_OUT_STATUS msgresponse */
+#define MC_CMD_PTP_OUT_STATUS_LEN 64
+/* Frequency of NIC's hardware clock */
+#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+/* Number of packets transmitted and timestamped */
+#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+/* Number of packets received and timestamped */
+#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+/* Number of packets timestamped by the FPGA */
+#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+/* Number of packets filter matched */
+#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+/* Number of packets not filter matched */
+#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+/* Number of PPS overflows (noise on input?) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+/* Number of PPS bad periods */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+/* Minimum period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+/* Maximum period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+/* Last period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+/* Mean period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+/* Minimum offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+/* Maximum offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+/* Last offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+/* Mean offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
+
+/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num))
+/* A set of host and NIC times */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
+/* Host time immediately before NIC's hardware clock read */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
+/* Host time immediately after NIC's hardware clock read */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+/* Number of nanoseconds waited after reading NIC's hardware clock */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
+
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
+/* enum: Successful test */
+#define MC_CMD_PTP_MANF_SUCCESS 0x0
+/* enum: FPGA load failed */
+#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1
+/* enum: FPGA version invalid */
+#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2
+/* enum: FPGA registers incorrect */
+#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3
+/* enum: Oscillator possibly not working? */
+#define MC_CMD_PTP_MANF_OSCILLATOR 0x4
+/* enum: Timestamps not increasing */
+#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5
+/* enum: Mismatched packet count */
+#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6
+/* enum: Mismatched packet count (Siena filter and FPGA) */
+#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7
+/* enum: Not enough packets to perform timestamp check */
+#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
+/* enum: Timestamp trigger GPIO not working */
+#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
+/* enum: Insufficient PPS events to perform checks */
+#define MC_CMD_PTP_MANF_PPS_ENOUGH 0xa
+/* enum: PPS time event period not sufficiently close to 1s. */
+#define MC_CMD_PTP_MANF_PPS_PERIOD 0xb
+/* enum: PPS time event nS reading not sufficiently close to zero. */
+#define MC_CMD_PTP_MANF_PPS_NS 0xc
+/* enum: PTP peripheral registers incorrect */
+#define MC_CMD_PTP_MANF_REGISTERS 0xd
+/* enum: Failed to read time from PTP peripheral */
+#define MC_CMD_PTP_MANF_CLOCK_READ 0xe
+/* Presence of external oscillator */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
+
+/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+/* Number of packets received by FPGA */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+/* Number of packets received by Siena filters */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+
+/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252
+#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num))
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
+
+/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
+
+/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* Minimum acceptable value for a corrected synchronization timeset. When
+ * comparing host and NIC clock times, the MC returns a set of samples that
+ * contain the host start and end time, the MC time when the host start was
+ * detected and the time the MC waited between reading the time and detecting
+ * the host end. The corrected sync window is the difference between the host
+ * end and start times minus the time that the MC waited for host end.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+/* Various PTP capabilities */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+/* Uncorrected error on non-PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+
+/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
+
+/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
+#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CSR_READ32
+ * Read 32bit words from the indirect memory map.
+ */
+#define MC_CMD_CSR_READ32 0xc
+#undef MC_CMD_0xc_PRIVILEGE_CTG
+
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CSR_READ32_IN msgrequest */
+#define MC_CMD_CSR_READ32_IN_LEN 12
+/* Address */
+#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+
+/* MC_CMD_CSR_READ32_OUT msgresponse */
+#define MC_CMD_CSR_READ32_OUT_LENMIN 4
+#define MC_CMD_CSR_READ32_OUT_LENMAX 252
+#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
+/* The last dword is the status, not a value read */
+#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_CSR_WRITE32
+ * Write 32bit dwords to the indirect memory map.
+ */
+#define MC_CMD_CSR_WRITE32 0xd
+#undef MC_CMD_0xd_PRIVILEGE_CTG
+
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CSR_WRITE32_IN msgrequest */
+#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
+#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
+#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
+/* Address */
+#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
+
+/* MC_CMD_CSR_WRITE32_OUT msgresponse */
+#define MC_CMD_CSR_WRITE32_OUT_LEN 4
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_HP
+ * These commands are used for HP related features. They are grouped under one
+ * MCDI command to avoid creating too many MCDI commands.
+ */
+#define MC_CMD_HP 0x54
+#undef MC_CMD_0x54_PRIVILEGE_CTG
+
+#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_HP_IN msgrequest */
+#define MC_CMD_HP_IN_LEN 16
+/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
+ * the specified address with the specified interval.When address is NULL,
+ * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
+ * state / 2: (debug) Show temperature reported by one of the supported
+ * sensors.
+ */
+#define MC_CMD_HP_IN_SUBCMD_OFST 0
+/* enum: OCSD (Option Card Sensor Data) sub-command. */
+#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
+/* enum: Last known valid HP sub-command. */
+#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
+/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
+ */
+#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
+/* The requested update interval, in seconds. (Or the sub-command if ADDR is
+ * NULL.)
+ */
+#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+
+/* MC_CMD_HP_OUT msgresponse */
+#define MC_CMD_HP_OUT_LEN 4
+#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+/* enum: OCSD stopped for this card. */
+#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
+/* enum: OCSD was successfully started with the address provided. */
+#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
+/* enum: OCSD was already started for this card. */
+#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
+
+
+/***********************************/
+/* MC_CMD_STACKINFO
+ * Get stack information.
+ */
+#define MC_CMD_STACKINFO 0xf
+#undef MC_CMD_0xf_PRIVILEGE_CTG
+
+#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_STACKINFO_IN msgrequest */
+#define MC_CMD_STACKINFO_IN_LEN 0
+
+/* MC_CMD_STACKINFO_OUT msgresponse */
+#define MC_CMD_STACKINFO_OUT_LENMIN 12
+#define MC_CMD_STACKINFO_OUT_LENMAX 252
+#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
+/* (thread ptr, stack size, free space) for each thread in system */
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
+
+
+/***********************************/
+/* MC_CMD_MDIO_READ
+ * MDIO register read.
+ */
+#define MC_CMD_MDIO_READ 0x10
+#undef MC_CMD_0x10_PRIVILEGE_CTG
+
+#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MDIO_READ_IN msgrequest */
+#define MC_CMD_MDIO_READ_IN_LEN 16
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
+#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
+/* enum: Internal. */
+#define MC_CMD_MDIO_BUS_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
+/* Port address */
+#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+/* Device Address or clause 22. */
+#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+#define MC_CMD_MDIO_CLAUSE22 0x20
+/* Address */
+#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+
+/* MC_CMD_MDIO_READ_OUT msgresponse */
+#define MC_CMD_MDIO_READ_OUT_LEN 8
+/* Value */
+#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+/* Status the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
+#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+/* enum: Good. */
+#define MC_CMD_MDIO_STATUS_GOOD 0x8
+
+
+/***********************************/
+/* MC_CMD_MDIO_WRITE
+ * MDIO register write.
+ */
+#define MC_CMD_MDIO_WRITE 0x11
+#undef MC_CMD_0x11_PRIVILEGE_CTG
+
+#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MDIO_WRITE_IN msgrequest */
+#define MC_CMD_MDIO_WRITE_IN_LEN 20
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
+#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+/* enum: Internal. */
+/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
+/* enum: External. */
+/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
+/* Port address */
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+/* Device Address or clause 22. */
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+/* MC_CMD_MDIO_CLAUSE22 0x20 */
+/* Address */
+#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+/* Value */
+#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+
+/* MC_CMD_MDIO_WRITE_OUT msgresponse */
+#define MC_CMD_MDIO_WRITE_OUT_LEN 4
+/* Status; the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+/* enum: Good. */
+/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
+
+
+/***********************************/
+/* MC_CMD_DBI_WRITE
+ * Write DBI register(s).
+ */
+#define MC_CMD_DBI_WRITE 0x12
+#undef MC_CMD_0x12_PRIVILEGE_CTG
+
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DBI_WRITE_IN msgrequest */
+#define MC_CMD_DBI_WRITE_IN_LENMIN 12
+#define MC_CMD_DBI_WRITE_IN_LENMAX 252
+#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
+/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
+ * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
+ */
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
+
+/* MC_CMD_DBI_WRITE_OUT msgresponse */
+#define MC_CMD_DBI_WRITE_OUT_LEN 0
+
+/* MC_CMD_DBIWROP_TYPEDEF structuredef */
+#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PORT_READ32
+ * Read a 32-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ32 0x14
+
+/* MC_CMD_PORT_READ32_IN msgrequest */
+#define MC_CMD_PORT_READ32_IN_LEN 4
+/* Address */
+#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+
+/* MC_CMD_PORT_READ32_OUT msgresponse */
+#define MC_CMD_PORT_READ32_OUT_LEN 8
+/* Value */
+#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+/* Status */
+#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE32
+ * Write a 32-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE32 0x15
+
+/* MC_CMD_PORT_WRITE32_IN msgrequest */
+#define MC_CMD_PORT_WRITE32_IN_LEN 8
+/* Address */
+#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+/* Value */
+#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+
+/* MC_CMD_PORT_WRITE32_OUT msgresponse */
+#define MC_CMD_PORT_WRITE32_OUT_LEN 4
+/* Status */
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_PORT_READ128
+ * Read a 128-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ128 0x16
+
+/* MC_CMD_PORT_READ128_IN msgrequest */
+#define MC_CMD_PORT_READ128_IN_LEN 4
+/* Address */
+#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+
+/* MC_CMD_PORT_READ128_OUT msgresponse */
+#define MC_CMD_PORT_READ128_OUT_LEN 20
+/* Value */
+#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
+/* Status */
+#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE128
+ * Write a 128-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE128 0x17
+
+/* MC_CMD_PORT_WRITE128_IN msgrequest */
+#define MC_CMD_PORT_WRITE128_IN_LEN 20
+/* Address */
+#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+/* Value */
+#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
+
+/* MC_CMD_PORT_WRITE128_OUT msgresponse */
+#define MC_CMD_PORT_WRITE128_OUT_LEN 4
+/* Status */
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+
+/* MC_CMD_CAPABILITIES structuredef */
+#define MC_CMD_CAPABILITIES_LEN 4
+/* Small buf table. */
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
+/* Turbo mode (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_LBN 1
+#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
+/* Turbo mode active (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
+/* PTP offload. */
+#define MC_CMD_CAPABILITIES_PTP_LBN 3
+#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
+/* AOE mode. */
+#define MC_CMD_CAPABILITIES_AOE_LBN 4
+#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
+#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
+#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
+
+
+/***********************************/
+/* MC_CMD_GET_BOARD_CFG
+ * Returns the MC firmware configuration structure.
+ */
+#define MC_CMD_GET_BOARD_CFG 0x18
+#undef MC_CMD_0x18_PRIVILEGE_CTG
+
+#define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_BOARD_CFG_IN msgrequest */
+#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
+
+/* MC_CMD_GET_BOARD_CFG_OUT msgresponse */
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMIN 96
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
+#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
+/* See MC_CMD_CAPABILITIES */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
+/* See MC_CMD_CAPABILITIES */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
+/* This field contains a 16-bit value for each of the types of NVRAM area. The
+ * values are defined in the firmware/mc/platform/.c file for a specific board
+ * type, but otherwise have no meaning to the MC; they are used by the driver
+ * to manage selection of appropriate firmware updates.
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM 32
+
+
+/***********************************/
+/* MC_CMD_DBI_READX
+ * Read DBI register(s) -- extended functionality
+ */
+#define MC_CMD_DBI_READX 0x19
+#undef MC_CMD_0x19_PRIVILEGE_CTG
+
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DBI_READX_IN msgrequest */
+#define MC_CMD_DBI_READX_IN_LENMIN 8
+#define MC_CMD_DBI_READX_IN_LENMAX 248
+#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
+/* Each Read op consists of an address (offset 0), VF/CS2) */
+#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
+
+/* MC_CMD_DBI_READX_OUT msgresponse */
+#define MC_CMD_DBI_READX_OUT_LENMIN 4
+#define MC_CMD_DBI_READX_OUT_LENMAX 252
+#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
+/* Value */
+#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
+#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
+#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
+#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
+
+/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
+#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_SET_RAND_SEED
+ * Set the 16byte seed for the MC pseudo-random generator.
+ */
+#define MC_CMD_SET_RAND_SEED 0x1a
+#undef MC_CMD_0x1a_PRIVILEGE_CTG
+
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_RAND_SEED_IN msgrequest */
+#define MC_CMD_SET_RAND_SEED_IN_LEN 16
+/* Seed value. */
+#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
+#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
+
+/* MC_CMD_SET_RAND_SEED_OUT msgresponse */
+#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LTSSM_HIST
+ * Retrieve the history of the LTSSM, if the build supports it.
+ */
+#define MC_CMD_LTSSM_HIST 0x1b
+
+/* MC_CMD_LTSSM_HIST_IN msgrequest */
+#define MC_CMD_LTSSM_HIST_IN_LEN 0
+
+/* MC_CMD_LTSSM_HIST_OUT msgresponse */
+#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
+#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
+#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
+/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
+#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_DRV_ATTACH
+ * Inform MCPU that this port is managed on the host (i.e. driver active). For
+ * Huntington, also request the preferred datapath firmware to use if possible
+ * (it may not be possible for this request to be fulfilled; the driver must
+ * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which
+ * features are actually available). The FIRMWARE_ID field is ignored by older
+ * platforms.
+ */
+#define MC_CMD_DRV_ATTACH 0x1c
+#undef MC_CMD_0x1c_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DRV_ATTACH_IN msgrequest */
+#define MC_CMD_DRV_ATTACH_IN_LEN 12
+/* new state to set if UPDATE=1 */
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_LBN 0
+#define MC_CMD_DRV_ATTACH_WIDTH 1
+#define MC_CMD_DRV_PREBOOT_LBN 1
+#define MC_CMD_DRV_PREBOOT_WIDTH 1
+/* 1 to set new state, or 0 to just report the existing state */
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+/* preferred datapath firmware (for Huntington; ignored for Siena) */
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+/* enum: Prefer to use full featured firmware */
+#define MC_CMD_FW_FULL_FEATURED 0x0
+/* enum: Prefer to use firmware with fewer features but lower latency */
+#define MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Prefer to use firmware for SolarCapture packed stream mode */
+#define MC_CMD_FW_PACKED_STREAM 0x2
+/* enum: Prefer to use firmware with fewer features and simpler TX event
+ * batching but higher TX packet rate
+ */
+#define MC_CMD_FW_HIGH_TX_RATE 0x3
+/* enum: Reserved value */
+#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4
+/* enum: Prefer to use firmware with additional "rules engine" filtering
+ * support
+ */
+#define MC_CMD_FW_RULES_ENGINE 0x5
+/* enum: Only this option is allowed for non-admin functions */
+#define MC_CMD_FW_DONT_CARE 0xffffffff
+
+/* MC_CMD_DRV_ATTACH_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_OUT_LEN 4
+/* previous or existing state, see the bitmask at NEW_STATE */
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+
+/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
+/* previous or existing state, see the bitmask at NEW_STATE */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+/* Flags associated with this function */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+/* enum: Labels the lowest-numbered function visible to the OS */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
+/* enum: The function can control the link state of the physical port it is
+ * bound to.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1
+/* enum: The function can perform privileged operations */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2
+/* enum: The function does not have an active port associated with it. The port
+ * refers to the Sorrento external FPGA port.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3
+
+
+/***********************************/
+/* MC_CMD_SHMUART
+ * Route UART output to circular buffer in shared memory instead.
+ */
+#define MC_CMD_SHMUART 0x1f
+
+/* MC_CMD_SHMUART_IN msgrequest */
+#define MC_CMD_SHMUART_IN_LEN 4
+/* ??? */
+#define MC_CMD_SHMUART_IN_FLAG_OFST 0
+
+/* MC_CMD_SHMUART_OUT msgresponse */
+#define MC_CMD_SHMUART_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PORT_RESET
+ * Generic per-port reset. There is no equivalent for per-board reset. Locks
+ * required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
+ * use MC_CMD_ENTITY_RESET instead.
+ */
+#define MC_CMD_PORT_RESET 0x20
+#undef MC_CMD_0x20_PRIVILEGE_CTG
+
+#define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PORT_RESET_IN msgrequest */
+#define MC_CMD_PORT_RESET_IN_LEN 0
+
+/* MC_CMD_PORT_RESET_OUT msgresponse */
+#define MC_CMD_PORT_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ENTITY_RESET
+ * Generic per-resource reset. There is no equivalent for per-board reset.
+ * Locks required: None; Return code: 0, ETIME. NOTE: This command is an
+ * extended version of the deprecated MC_CMD_PORT_RESET with added fields.
+ */
+#define MC_CMD_ENTITY_RESET 0x20
+/* MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */
+
+/* MC_CMD_ENTITY_RESET_IN msgrequest */
+#define MC_CMD_ENTITY_RESET_IN_LEN 4
+/* Optional flags field. Omitting this will perform a "legacy" reset action
+ * (TBD).
+ */
+#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
+
+/* MC_CMD_ENTITY_RESET_OUT msgresponse */
+#define MC_CMD_ENTITY_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PCIE_CREDITS
+ * Read instantaneous and minimum flow control thresholds.
+ */
+#define MC_CMD_PCIE_CREDITS 0x21
+
+/* MC_CMD_PCIE_CREDITS_IN msgrequest */
+#define MC_CMD_PCIE_CREDITS_IN_LEN 8
+/* poll period. 0 is disabled */
+#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+/* wipe statistics */
+#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+
+/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
+#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2
+
+
+/***********************************/
+/* MC_CMD_RXD_MONITOR
+ * Get histogram of RX queue fill level.
+ */
+#define MC_CMD_RXD_MONITOR 0x22
+
+/* MC_CMD_RXD_MONITOR_IN msgrequest */
+#define MC_CMD_RXD_MONITOR_IN_LEN 12
+#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+
+/* MC_CMD_RXD_MONITOR_OUT msgresponse */
+#define MC_CMD_RXD_MONITOR_OUT_LEN 80
+#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+
+
+/***********************************/
+/* MC_CMD_PUTS
+ * Copy the given ASCII string out onto UART and/or out of the network port.
+ */
+#define MC_CMD_PUTS 0x23
+#undef MC_CMD_0x23_PRIVILEGE_CTG
+
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PUTS_IN msgrequest */
+#define MC_CMD_PUTS_IN_LENMIN 13
+#define MC_CMD_PUTS_IN_LENMAX 252
+#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
+#define MC_CMD_PUTS_IN_DEST_OFST 0
+#define MC_CMD_PUTS_IN_UART_LBN 0
+#define MC_CMD_PUTS_IN_UART_WIDTH 1
+#define MC_CMD_PUTS_IN_PORT_LBN 1
+#define MC_CMD_PUTS_IN_PORT_WIDTH 1
+#define MC_CMD_PUTS_IN_DHOST_OFST 4
+#define MC_CMD_PUTS_IN_DHOST_LEN 6
+#define MC_CMD_PUTS_IN_STRING_OFST 12
+#define MC_CMD_PUTS_IN_STRING_LEN 1
+#define MC_CMD_PUTS_IN_STRING_MINNUM 1
+#define MC_CMD_PUTS_IN_STRING_MAXNUM 240
+
+/* MC_CMD_PUTS_OUT msgresponse */
+#define MC_CMD_PUTS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_CFG
+ * Report PHY configuration. This guarantees to succeed even if the PHY is in a
+ * 'zombie' state. Locks required: None
+ */
+#define MC_CMD_GET_PHY_CFG 0x24
+#undef MC_CMD_0x24_PRIVILEGE_CTG
+
+#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PHY_CFG_IN msgrequest */
+#define MC_CMD_GET_PHY_CFG_IN_LEN 0
+
+/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
+#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
+/* flags */
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+/* Bitmask of supported capabilities */
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define MC_CMD_PHY_CAP_10HDX_LBN 1
+#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10FDX_LBN 2
+#define MC_CMD_PHY_CAP_10FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100HDX_LBN 3
+#define MC_CMD_PHY_CAP_100HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100FDX_LBN 4
+#define MC_CMD_PHY_CAP_100FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000HDX_LBN 5
+#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000FDX_LBN 6
+#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10000FDX_LBN 7
+#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_PAUSE_LBN 8
+#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1
+#define MC_CMD_PHY_CAP_ASYM_LBN 9
+#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
+#define MC_CMD_PHY_CAP_AN_LBN 10
+#define MC_CMD_PHY_CAP_AN_WIDTH 1
+#define MC_CMD_PHY_CAP_40000FDX_LBN 11
+#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_DDM_LBN 12
+#define MC_CMD_PHY_CAP_DDM_WIDTH 1
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+/* enum: Xaui. */
+#define MC_CMD_MEDIA_XAUI 0x1
+/* enum: CX4. */
+#define MC_CMD_MEDIA_CX4 0x2
+/* enum: KX4. */
+#define MC_CMD_MEDIA_KX4 0x3
+/* enum: XFP Far. */
+#define MC_CMD_MEDIA_XFP 0x4
+/* enum: SFP+. */
+#define MC_CMD_MEDIA_SFP_PLUS 0x5
+/* enum: 10GBaseT. */
+#define MC_CMD_MEDIA_BASE_T 0x6
+/* enum: QSFP+. */
+#define MC_CMD_MEDIA_QSFP_PLUS 0x7
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+/* enum: Native clause 22 */
+#define MC_CMD_MMD_CLAUSE22 0x0
+#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
+#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PHYXS 0x4 /* enum */
+#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */
+#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */
+#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */
+/* enum: Clause22 proxied over clause45 by PHY. */
+#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d
+#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */
+#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
+
+
+/***********************************/
+/* MC_CMD_START_BIST
+ * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST
+ * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
+ */
+#define MC_CMD_START_BIST 0x25
+#undef MC_CMD_0x25_PRIVILEGE_CTG
+
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_START_BIST_IN msgrequest */
+#define MC_CMD_START_BIST_IN_LEN 4
+/* Type of test. */
+#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+/* enum: Run the PHY's short cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1
+/* enum: Run the PHY's long cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_LONG 0x2
+/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */
+#define MC_CMD_BPX_SERDES_BIST 0x3
+/* enum: Run the MC loopback tests. */
+#define MC_CMD_MC_LOOPBACK_BIST 0x4
+/* enum: Run the PHY's standard BIST. */
+#define MC_CMD_PHY_BIST 0x5
+/* enum: Run MC RAM test. */
+#define MC_CMD_MC_MEM_BIST 0x6
+/* enum: Run Port RAM test. */
+#define MC_CMD_PORT_MEM_BIST 0x7
+/* enum: Run register test. */
+#define MC_CMD_REG_BIST 0x8
+
+/* MC_CMD_START_BIST_OUT msgresponse */
+#define MC_CMD_START_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_POLL_BIST
+ * Poll for BIST completion. Returns a single status code, and optionally some
+ * PHY specific bist output. The driver should only consume the BIST output
+ * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't
+ * successfully parse the BIST output, it should still respect the pass/Fail in
+ * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0,
+ * EACCES (if PHY_LOCK is not held).
+ */
+#define MC_CMD_POLL_BIST 0x26
+#undef MC_CMD_0x26_PRIVILEGE_CTG
+
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_POLL_BIST_IN msgrequest */
+#define MC_CMD_POLL_BIST_IN_LEN 0
+
+/* MC_CMD_POLL_BIST_OUT msgresponse */
+#define MC_CMD_POLL_BIST_OUT_LEN 8
+/* result */
+#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+/* enum: Running. */
+#define MC_CMD_POLL_BIST_RUNNING 0x1
+/* enum: Passed. */
+#define MC_CMD_POLL_BIST_PASSED 0x2
+/* enum: Failed. */
+#define MC_CMD_POLL_BIST_FAILED 0x3
+/* enum: Timed-out. */
+#define MC_CMD_POLL_BIST_TIMEOUT 0x4
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+
+/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+/* Status of each channel A */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+/* enum: Ok. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
+/* enum: Open. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2
+/* enum: Intra-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3
+/* enum: Inter-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4
+/* enum: Busy. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
+/* Status of each channel B */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+/* Status of each channel C */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+/* Status of each channel D */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+
+/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+/* enum: Complete. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
+/* enum: Bus switch off I2C write. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1
+/* enum: Bus switch off I2C no access IO exp. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2
+/* enum: Bus switch off I2C no access module. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3
+/* enum: IO exp I2C configure. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4
+/* enum: Bus switch I2C no cross talk. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5
+/* enum: Module presence. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6
+/* enum: Module ID I2C access. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7
+/* enum: Module ID sane value. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8
+
+/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+/* enum: Test has completed. */
+#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
+/* enum: RAM test - walk ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1
+/* enum: RAM test - walk zeros. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2
+/* enum: RAM test - walking inversions zeros/ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3
+/* enum: RAM test - walking inversions checkerboard. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4
+/* enum: Register test - set / clear individual bits. */
+#define MC_CMD_POLL_BIST_MEM_REG 0x5
+/* enum: ECC error detected. */
+#define MC_CMD_POLL_BIST_MEM_ECC 0x6
+/* Failure address, only valid if result is POLL_BIST_FAILED */
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+/* Bus or address space to which the failure address corresponds */
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+/* enum: MC MIPS bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
+/* enum: CSR IREG bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1
+/* enum: RX0 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2
+/* enum: TX0 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3
+/* enum: TX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4
+/* enum: RX0 DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5
+/* enum: TX DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6
+/* enum: RX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX1 0x7
+/* enum: RX1 DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8
+/* Pattern written to RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+/* Actual value read from RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+/* ECC error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+/* ECC parity error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+/* ECC fatal error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
+
+
+/***********************************/
+/* MC_CMD_FLUSH_RX_QUEUES
+ * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
+ * flushes should be initiated via this MCDI operation, rather than via
+ * directly writing FLUSH_CMD.
+ *
+ * The flush is completed (either done/fail) asynchronously (after this command
+ * returns). The driver must still wait for flush done/failure events as usual.
+ */
+#define MC_CMD_FLUSH_RX_QUEUES 0x27
+
+/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
+
+/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
+#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LOOPBACK_MODES
+ * Returns a bitmask of loopback modes available at each speed.
+ */
+#define MC_CMD_GET_LOOPBACK_MODES 0x28
+#undef MC_CMD_0x28_PRIVILEGE_CTG
+
+#define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
+/* enum: None. */
+#define MC_CMD_LOOPBACK_NONE 0x0
+/* enum: Data. */
+#define MC_CMD_LOOPBACK_DATA 0x1
+/* enum: GMAC. */
+#define MC_CMD_LOOPBACK_GMAC 0x2
+/* enum: XGMII. */
+#define MC_CMD_LOOPBACK_XGMII 0x3
+/* enum: XGXS. */
+#define MC_CMD_LOOPBACK_XGXS 0x4
+/* enum: XAUI. */
+#define MC_CMD_LOOPBACK_XAUI 0x5
+/* enum: GMII. */
+#define MC_CMD_LOOPBACK_GMII 0x6
+/* enum: SGMII. */
+#define MC_CMD_LOOPBACK_SGMII 0x7
+/* enum: XGBR. */
+#define MC_CMD_LOOPBACK_XGBR 0x8
+/* enum: XFI. */
+#define MC_CMD_LOOPBACK_XFI 0x9
+/* enum: XAUI Far. */
+#define MC_CMD_LOOPBACK_XAUI_FAR 0xa
+/* enum: GMII Far. */
+#define MC_CMD_LOOPBACK_GMII_FAR 0xb
+/* enum: SGMII Far. */
+#define MC_CMD_LOOPBACK_SGMII_FAR 0xc
+/* enum: XFI Far. */
+#define MC_CMD_LOOPBACK_XFI_FAR 0xd
+/* enum: GPhy. */
+#define MC_CMD_LOOPBACK_GPHY 0xe
+/* enum: PhyXS. */
+#define MC_CMD_LOOPBACK_PHYXS 0xf
+/* enum: PCS. */
+#define MC_CMD_LOOPBACK_PCS 0x10
+/* enum: PMA-PMD. */
+#define MC_CMD_LOOPBACK_PMAPMD 0x11
+/* enum: Cross-Port. */
+#define MC_CMD_LOOPBACK_XPORT 0x12
+/* enum: XGMII-Wireside. */
+#define MC_CMD_LOOPBACK_XGMII_WS 0x13
+/* enum: XAUI Wireside. */
+#define MC_CMD_LOOPBACK_XAUI_WS 0x14
+/* enum: XAUI Wireside Far. */
+#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15
+/* enum: XAUI Wireside near. */
+#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16
+/* enum: GMII Wireside. */
+#define MC_CMD_LOOPBACK_GMII_WS 0x17
+/* enum: XFI Wireside. */
+#define MC_CMD_LOOPBACK_XFI_WS 0x18
+/* enum: XFI Wireside Far. */
+#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19
+/* enum: PhyXS Wireside. */
+#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a
+/* enum: PMA lanes MAC-Serdes. */
+#define MC_CMD_LOOPBACK_PMA_INT 0x1b
+/* enum: KR Serdes Parallel (Encoder). */
+#define MC_CMD_LOOPBACK_SD_NEAR 0x1c
+/* enum: KR Serdes Serial. */
+#define MC_CMD_LOOPBACK_SD_FAR 0x1d
+/* enum: PMA lanes MAC-Serdes Wireside. */
+#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
+/* enum: KR Serdes Serial Wireside. */
+#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
+/* enum: Near side of AOE Siena side port */
+#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
+/* enum: Medford Wireside datapath loopback */
+#define MC_CMD_LOOPBACK_DATA_WS 0x24
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
+
+
+/***********************************/
+/* MC_CMD_GET_LINK
+ * Read the unified MAC/PHY link state. Locks required: None Return code: 0,
+ * ETIME.
+ */
+#define MC_CMD_GET_LINK 0x29
+#undef MC_CMD_0x29_PRIVILEGE_CTG
+
+#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LINK_IN msgrequest */
+#define MC_CMD_GET_LINK_IN_LEN 0
+
+/* MC_CMD_GET_LINK_OUT msgresponse */
+#define MC_CMD_GET_LINK_OUT_LEN 28
+/* near-side advertised capabilities */
+#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
+/* link-partner advertised capabilities */
+#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero.
+ */
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+/* Current loopback setting. */
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
+#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
+/* This returns the negotiated flow control value. */
+#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+/* Enum values, see field(s): */
+/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_SET_LINK
+ * Write the unified MAC/PHY link configuration. Locks required: None. Return
+ * code: 0, EINVAL, ETIME
+ */
+#define MC_CMD_SET_LINK 0x2a
+#undef MC_CMD_0x2a_PRIVILEGE_CTG
+
+#define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_LINK_IN msgrequest */
+#define MC_CMD_SET_LINK_IN_LEN 16
+/* ??? */
+#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+/* Flags */
+#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
+/* Loopback mode. */
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+
+/* MC_CMD_SET_LINK_OUT msgresponse */
+#define MC_CMD_SET_LINK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_ID_LED
+ * Set identification LED state. Locks required: None. Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_ID_LED 0x2b
+#undef MC_CMD_0x2b_PRIVILEGE_CTG
+
+#define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_ID_LED_IN msgrequest */
+#define MC_CMD_SET_ID_LED_IN_LEN 4
+/* Set LED state. */
+#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define MC_CMD_LED_OFF 0x0 /* enum */
+#define MC_CMD_LED_ON 0x1 /* enum */
+#define MC_CMD_LED_DEFAULT 0x2 /* enum */
+
+/* MC_CMD_SET_ID_LED_OUT msgresponse */
+#define MC_CMD_SET_ID_LED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MAC
+ * Set MAC configuration. Locks required: None. Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_MAC 0x2c
+#undef MC_CMD_0x2c_PRIVILEGE_CTG
+
+#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_MAC_IN msgrequest */
+#define MC_CMD_SET_MAC_IN_LEN 28
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+/* enum: Flow control is off. */
+#define MC_CMD_FCNTL_OFF 0x0
+/* enum: Respond to flow control. */
+#define MC_CMD_FCNTL_RESPOND 0x1
+/* enum: Respond to and Issue flow control. */
+#define MC_CMD_FCNTL_BIDIR 0x2
+/* enum: Auto neg flow control. */
+#define MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control (eftest builds only). */
+#define MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define MC_CMD_FCNTL_GENERATE 0x5
+#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
+
+/* MC_CMD_SET_MAC_EXT_IN msgrequest */
+#define MC_CMD_SET_MAC_EXT_IN_LEN 32
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+/* enum: Flow control is off. */
+/* MC_CMD_FCNTL_OFF 0x0 */
+/* enum: Respond to flow control. */
+/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* enum: Respond to and Issue flow control. */
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto neg flow control. */
+/* MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control (eftest builds only). */
+/* MC_CMD_FCNTL_QBB 0x4 */
+/* enum: Issue flow control. */
+/* MC_CMD_FCNTL_GENERATE 0x5 */
+#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in
+ * capabilities then this field is ignored (and all flags are assumed to be
+ * set).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1
+
+/* MC_CMD_SET_MAC_OUT msgresponse */
+#define MC_CMD_SET_MAC_OUT_LEN 0
+
+/* MC_CMD_SET_MAC_V2_OUT msgresponse */
+#define MC_CMD_SET_MAC_V2_OUT_LEN 4
+/* MTU as configured after processing the request. See comment at
+ * MC_CMD_SET_MAC_IN/MTU. To query MTU without doing any changes, set CONTROL
+ * to 0.
+ */
+#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+
+
+/***********************************/
+/* MC_CMD_PHY_STATS
+ * Get generic PHY statistics. This call returns the statistics for a generic
+ * PHY in a sparse array (indexed by the enumerate). Each value is represented
+ * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the
+ * statistics may be read from the message response. If DMA_ADDR != 0, then the
+ * statistics are dmad to that (page-aligned location). Locks required: None.
+ * Returns: 0, ETIME
+ */
+#define MC_CMD_PHY_STATS 0x2d
+#undef MC_CMD_0x2d_PRIVILEGE_CTG
+
+#define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_PHY_STATS_IN msgrequest */
+#define MC_CMD_PHY_STATS_IN_LEN 8
+/* ??? */
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_PHY_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (((MC_CMD_PHY_NSTATS*32))>>3)
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
+/* enum: OUI. */
+#define MC_CMD_OUI 0x0
+/* enum: PMA-PMD Link Up. */
+#define MC_CMD_PMA_PMD_LINK_UP 0x1
+/* enum: PMA-PMD RX Fault. */
+#define MC_CMD_PMA_PMD_RX_FAULT 0x2
+/* enum: PMA-PMD TX Fault. */
+#define MC_CMD_PMA_PMD_TX_FAULT 0x3
+/* enum: PMA-PMD Signal */
+#define MC_CMD_PMA_PMD_SIGNAL 0x4
+/* enum: PMA-PMD SNR A. */
+#define MC_CMD_PMA_PMD_SNR_A 0x5
+/* enum: PMA-PMD SNR B. */
+#define MC_CMD_PMA_PMD_SNR_B 0x6
+/* enum: PMA-PMD SNR C. */
+#define MC_CMD_PMA_PMD_SNR_C 0x7
+/* enum: PMA-PMD SNR D. */
+#define MC_CMD_PMA_PMD_SNR_D 0x8
+/* enum: PCS Link Up. */
+#define MC_CMD_PCS_LINK_UP 0x9
+/* enum: PCS RX Fault. */
+#define MC_CMD_PCS_RX_FAULT 0xa
+/* enum: PCS TX Fault. */
+#define MC_CMD_PCS_TX_FAULT 0xb
+/* enum: PCS BER. */
+#define MC_CMD_PCS_BER 0xc
+/* enum: PCS Block Errors. */
+#define MC_CMD_PCS_BLOCK_ERRORS 0xd
+/* enum: PhyXS Link Up. */
+#define MC_CMD_PHYXS_LINK_UP 0xe
+/* enum: PhyXS RX Fault. */
+#define MC_CMD_PHYXS_RX_FAULT 0xf
+/* enum: PhyXS TX Fault. */
+#define MC_CMD_PHYXS_TX_FAULT 0x10
+/* enum: PhyXS Align. */
+#define MC_CMD_PHYXS_ALIGN 0x11
+/* enum: PhyXS Sync. */
+#define MC_CMD_PHYXS_SYNC 0x12
+/* enum: AN link-up. */
+#define MC_CMD_AN_LINK_UP 0x13
+/* enum: AN Complete. */
+#define MC_CMD_AN_COMPLETE 0x14
+/* enum: AN 10GBaseT Status. */
+#define MC_CMD_AN_10GBT_STATUS 0x15
+/* enum: Clause 22 Link-Up. */
+#define MC_CMD_CL22_LINK_UP 0x16
+/* enum: (Last entry) */
+#define MC_CMD_PHY_NSTATS 0x17
+
+
+/***********************************/
+/* MC_CMD_MAC_STATS
+ * Get generic MAC statistics. This call returns unified statistics maintained
+ * by the MC as it switches between the GMAC and XMAC. The MC will write out
+ * all supported stats. The driver should zero initialise the buffer to
+ * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
+ * performed, and the statistics may be read from the message response. If
+ * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
+ * Locks required: None. The PERIODIC_CLEAR option is not used and now has no
+ * effect. Returns: 0, ETIME
+ */
+#define MC_CMD_MAC_STATS 0x2e
+#undef MC_CMD_0x2e_PRIVILEGE_CTG
+
+#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATS_IN msgrequest */
+#define MC_CMD_MAC_STATS_IN_LEN 20
+/* ??? */
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+/* port id so vadapter stats can be provided */
+#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
+
+/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
+#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
+#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
+#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
+#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
+#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */
+#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */
+#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */
+#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */
+#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */
+#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */
+#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */
+#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */
+#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */
+#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */
+#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */
+#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */
+#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */
+#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */
+#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */
+#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */
+#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */
+#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */
+#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */
+#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */
+#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */
+#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */
+#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */
+#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */
+#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */
+#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */
+#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */
+#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */
+#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */
+#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */
+#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */
+#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */
+#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */
+#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */
+#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */
+#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */
+#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */
+#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */
+#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */
+#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */
+#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */
+#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */
+#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */
+#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */
+#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */
+#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */
+#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */
+#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */
+#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */
+#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */
+#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */
+#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
+#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
+#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
+/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
+/* enum: PM discard_bb_overflow counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
+/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
+/* enum: PM discard_vfifo_full counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
+/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_QBB 0x40
+/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_QBB 0x41
+/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
+/* enum: RXDP counter: Number of packets dropped due to the queue being
+ * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
+/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
+ * with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
+/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
+/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed.
+ * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
+/* enum: RXDP counter: Number of times the DPCPU waited for an existing
+ * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48
+#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */
+/* enum: Start of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_START 0x40
+/* enum: End of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_END 0x5f
+#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
+#define MC_CMD_MAC_NSTATS 0x61 /* enum */
+
+
+/***********************************/
+/* MC_CMD_SRIOV
+ * to be documented
+ */
+#define MC_CMD_SRIOV 0x30
+
+/* MC_CMD_SRIOV_IN msgrequest */
+#define MC_CMD_SRIOV_IN_LEN 12
+#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+
+/* MC_CMD_SRIOV_OUT msgresponse */
+#define MC_CMD_SRIOV_OUT_LEN 8
+#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+
+/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
+/* this is only used for the first record */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MEMCPY
+ * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
+ * embedded directly in the command.
+ *
+ * A common pattern is for a client to use generation counts to signal a dma
+ * update of a datastructure. To facilitate this, this MCDI operation can
+ * contain multiple requests which are executed in strict order. Requests take
+ * the form of duplicating the entire MCDI request continuously (including the
+ * requests record, which is ignored in all but the first structure)
+ *
+ * The source data can either come from a DMA from the host, or it can be
+ * embedded within the request directly, thereby eliminating a DMA read. To
+ * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
+ * ADDR_LO=offset, and inserts the data at %offset from the start of the
+ * payload. It's the callers responsibility to ensure that the embedded data
+ * doesn't overlap the records.
+ *
+ * Returns: 0, EINVAL (invalid RID)
+ */
+#define MC_CMD_MEMCPY 0x31
+
+/* MC_CMD_MEMCPY_IN msgrequest */
+#define MC_CMD_MEMCPY_IN_LENMIN 32
+#define MC_CMD_MEMCPY_IN_LENMAX 224
+#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
+/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
+#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
+#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
+#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
+#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
+
+/* MC_CMD_MEMCPY_OUT msgresponse */
+#define MC_CMD_MEMCPY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_SET
+ * Set a WoL filter.
+ */
+#define MC_CMD_WOL_FILTER_SET 0x32
+#undef MC_CMD_0x32_PRIVILEGE_CTG
+
+#define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
+#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
+/* A type value of 1 is unused. */
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+/* enum: Magic */
+#define MC_CMD_WOL_TYPE_MAGIC 0x0
+/* enum: MS Windows Magic */
+#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
+/* enum: IPv4 Syn */
+#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
+/* enum: IPv6 Syn */
+#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
+/* enum: Bitmap */
+#define MC_CMD_WOL_TYPE_BITMAP 0x5
+/* enum: Link */
+#define MC_CMD_WOL_TYPE_LINK 0x6
+/* enum: (Above this for future use) */
+#define MC_CMD_WOL_TYPE_MAX 0x7
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
+
+/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST 40
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST 42
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_LEN 128
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST 184
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST 185
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST 186
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_LEN 1
+
+/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
+
+/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_REMOVE
+ * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_REMOVE 0x33
+#undef MC_CMD_0x33_PRIVILEGE_CTG
+
+#define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
+#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+
+/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_RESET
+ * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0,
+ * ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_RESET 0x34
+#undef MC_CMD_0x34_PRIVILEGE_CTG
+
+#define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
+#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
+#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
+
+/* MC_CMD_WOL_FILTER_RESET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MCAST_HASH
+ * Set the MCAST hash value without otherwise reconfiguring the MAC
+ */
+#define MC_CMD_SET_MCAST_HASH 0x35
+
+/* MC_CMD_SET_MCAST_HASH_IN msgrequest */
+#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16
+
+/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */
+#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TYPES
+ * Return bitfield indicating available types of virtual NVRAM partitions.
+ * Locks required: none. Returns: 0
+ */
+#define MC_CMD_NVRAM_TYPES 0x36
+#undef MC_CMD_0x36_PRIVILEGE_CTG
+
+#define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_TYPES_IN msgrequest */
+#define MC_CMD_NVRAM_TYPES_IN_LEN 0
+
+/* MC_CMD_NVRAM_TYPES_OUT msgresponse */
+#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
+/* Bit mask of supported types. */
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+/* enum: Disabled callisto. */
+#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
+/* enum: MC firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW 0x1
+/* enum: MC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2
+/* enum: Static configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3
+/* enum: Static configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4
+/* enum: Dynamic configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5
+/* enum: Dynamic configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6
+/* enum: Expansion Rom. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7
+/* enum: Expansion Rom Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8
+/* enum: Expansion Rom Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9
+/* enum: Phy Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa
+/* enum: Phy Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb
+/* enum: Log. */
+#define MC_CMD_NVRAM_TYPE_LOG 0xc
+/* enum: FPGA image. */
+#define MC_CMD_NVRAM_TYPE_FPGA 0xd
+/* enum: FPGA backup image */
+#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe
+/* enum: FC firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW 0xf
+/* enum: FC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10
+/* enum: CPLD image. */
+#define MC_CMD_NVRAM_TYPE_CPLD 0x11
+/* enum: Licensing information. */
+#define MC_CMD_NVRAM_TYPE_LICENSE 0x12
+/* enum: FC Log. */
+#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13
+/* enum: Additional flash on FPGA. */
+#define MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14
+
+
+/***********************************/
+/* MC_CMD_NVRAM_INFO
+ * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0,
+ * EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_INFO 0x37
+#undef MC_CMD_0x37_PRIVILEGE_CTG
+
+#define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_INFO_IN msgrequest */
+#define MC_CMD_NVRAM_INFO_IN_LEN 4
+#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_INFO_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_OUT_LEN 24
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6
+#define MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+
+/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
+#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+/* Writes must be multiples of this size. Added to support the MUM on Sorrento.
+ */
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_START
+ * Start a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if
+ * PHY_LOCK required and not held).
+ */
+#define MC_CMD_NVRAM_UPDATE_START 0x38
+#undef MC_CMD_0x38_PRIVILEGE_CTG
+
+#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest: Legacy NVRAM_UPDATE_START request.
+ * Use NVRAM_UPDATE_START_V2_IN in new code
+ */
+#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_UPDATE_START_V2_IN msgrequest: Extended NVRAM_UPDATE_START
+ * request with additional flags indicating version of command in use. See
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended functionality. Use
+ * paired up with NVRAM_UPDATE_FINISH_V2_IN.
+ */
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
+
+/* MC_CMD_NVRAM_UPDATE_START_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_READ
+ * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_READ 0x39
+#undef MC_CMD_0x39_PRIVILEGE_CTG
+
+#define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_READ_IN msgrequest */
+#define MC_CMD_NVRAM_READ_IN_LEN 12
+#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+/* amount to read in bytes */
+#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+
+/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
+#define MC_CMD_NVRAM_READ_IN_V2_LEN 16
+#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+/* amount to read in bytes */
+#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+/* Optional control info. If a partition is stored with an A/B versioning
+ * scheme (i.e. in more than one physical partition in NVRAM) the host can set
+ * this to control which underlying physical partition is used to read data
+ * from. This allows it to perform a read-modify-write-verify with the write
+ * lock continuously held by calling NVRAM_UPDATE_START, reading the old
+ * contents using MODE=TARGET_CURRENT, overwriting the old partition and then
+ * verifying by reading with MODE=TARGET_BACKUP.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+/* enum: Same as omitting MODE: caller sees data in current partition unless it
+ * holds the write lock in which case it sees data in the partition it is
+ * updating.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_DEFAULT 0x0
+/* enum: Read from the current partition of an A/B pair, even if holding the
+ * write lock.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT 0x1
+/* enum: Read from the non-current (i.e. to be updated) partition of an A/B
+ * pair
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP 0x2
+
+/* MC_CMD_NVRAM_READ_OUT msgresponse */
+#define MC_CMD_NVRAM_READ_OUT_LENMIN 1
+#define MC_CMD_NVRAM_READ_OUT_LENMAX 252
+#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num))
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_NVRAM_WRITE
+ * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_WRITE 0x3a
+#undef MC_CMD_0x3a_PRIVILEGE_CTG
+
+#define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_WRITE_IN msgrequest */
+#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13
+#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
+#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240
+
+/* MC_CMD_NVRAM_WRITE_OUT msgresponse */
+#define MC_CMD_NVRAM_WRITE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_ERASE
+ * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_ERASE 0x3b
+#undef MC_CMD_0x3b_PRIVILEGE_CTG
+
+#define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_ERASE_IN msgrequest */
+#define MC_CMD_NVRAM_ERASE_IN_LEN 12
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+
+/* MC_CMD_NVRAM_ERASE_OUT msgresponse */
+#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_FINISH
+ * Finish a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad
+ * type/offset/length), EACCES (if PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
+#undef MC_CMD_0x3c_PRIVILEGE_CTG
+
+#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest: Legacy NVRAM_UPDATE_FINISH
+ * request. Use NVRAM_UPDATE_FINISH_V2_IN in new code
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH
+ * request with additional flags indicating version of NVRAM_UPDATE commands in
+ * use. See MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended
+ * functionality. Use paired up with NVRAM_UPDATE_START_V2_IN.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse: Legacy NVRAM_UPDATE_FINISH
+ * response. Use NVRAM_UPDATE_FINISH_V2_OUT in new code
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT msgresponse:
+ *
+ * Extended NVRAM_UPDATE_FINISH response that communicates the result of secure
+ * firmware validation where applicable back to the host.
+ *
+ * Medford only: For signed firmware images, such as those for medford, the MC
+ * firmware verifies the signature before marking the firmware image as valid.
+ * This process takes a few seconds to complete. So is likely to take more than
+ * the MCDI timeout. Hence signature verification is initiated when
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the
+ * MCDI command is run in a background MCDI processing thread. This response
+ * payload includes the results of the signature verification. Note that the
+ * per-partition nvram lock in firmware is only released after the verification
+ * has completed.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4
+/* Result of nvram update completion processing */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0
+/* enum: Verify succeeded without any errors. */
+#define MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1
+/* enum: CMS format verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED 0x2
+/* enum: Invalid CMS format in image metadata. */
+#define MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT 0x3
+/* enum: Message digest verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED 0x4
+/* enum: Error in message digest calculated over the reflash-header, payload
+ * and reflash-trailer.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST 0x5
+/* enum: Signature verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED 0x6
+/* enum: There are no valid signatures in the image. */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES 0x7
+/* enum: Trusted approvers verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED 0x8
+/* enum: The Trusted approver's list is empty. */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS 0x9
+/* enum: Signature chain verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED 0xa
+/* enum: The signers of the signatures in the image are not listed in the
+ * Trusted approver's list.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc
+
+
+/***********************************/
+/* MC_CMD_REBOOT
+ * Reboot the MC.
+ *
+ * The AFTER_ASSERTION flag is intended to be used when the driver notices an
+ * assertion failure (at which point it is expected to perform a complete tear
+ * down and reinitialise), to allow both ports to reset the MC once in an
+ * atomic fashion.
+ *
+ * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
+ * which means that they will automatically reboot out of the assertion
+ * handler, so this is in practise an optional operation. It is still
+ * recommended that drivers execute this to support custom firmwares with
+ * REBOOT_ON_ASSERT=0.
+ *
+ * Locks required: NONE Returns: Nothing. You get back a response with ERR=1,
+ * DATALEN=0
+ */
+#define MC_CMD_REBOOT 0x3d
+#undef MC_CMD_0x3d_PRIVILEGE_CTG
+
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_REBOOT_IN msgrequest */
+#define MC_CMD_REBOOT_IN_LEN 4
+#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
+
+/* MC_CMD_REBOOT_OUT msgresponse */
+#define MC_CMD_REBOOT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SCHEDINFO
+ * Request scheduler info. Locks required: NONE. Returns: An array of
+ * (timeslice,maximum overrun), one for each thread, in ascending order of
+ * thread address.
+ */
+#define MC_CMD_SCHEDINFO 0x3e
+#undef MC_CMD_0x3e_PRIVILEGE_CTG
+
+#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SCHEDINFO_IN msgrequest */
+#define MC_CMD_SCHEDINFO_IN_LEN 0
+
+/* MC_CMD_SCHEDINFO_OUT msgresponse */
+#define MC_CMD_SCHEDINFO_OUT_LENMIN 4
+#define MC_CMD_SCHEDINFO_OUT_LENMAX 252
+#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
+#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
+#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
+#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_REBOOT_MODE
+ * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
+ * mode to the specified value. Returns the old mode.
+ */
+#define MC_CMD_REBOOT_MODE 0x3f
+#undef MC_CMD_0x3f_PRIVILEGE_CTG
+
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_REBOOT_MODE_IN msgrequest */
+#define MC_CMD_REBOOT_MODE_IN_LEN 4
+#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+/* enum: Normal. */
+#define MC_CMD_REBOOT_MODE_NORMAL 0x0
+/* enum: Power-on Reset. */
+#define MC_CMD_REBOOT_MODE_POR 0x2
+/* enum: Snapper. */
+#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
+/* enum: snapper fake POR */
+#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
+#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
+#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
+
+/* MC_CMD_REBOOT_MODE_OUT msgresponse */
+#define MC_CMD_REBOOT_MODE_OUT_LEN 4
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SENSOR_INFO
+ * Returns information about every available sensor.
+ *
+ * Each sensor has a single (16bit) value, and a corresponding state. The
+ * mapping between value and state is nominally determined by the MC, but may
+ * be implemented using up to 2 ranges per sensor.
+ *
+ * This call returns a mask (32bit) of the sensors that are supported by this
+ * platform, then an array of sensor information structures, in order of sensor
+ * type (but without gaps for unimplemented sensors). Each structure defines
+ * the ranges for the corresponding sensor. An unused range is indicated by
+ * equal limit values. If one range is used, a value outside that range results
+ * in STATE_FATAL. If two ranges are used, a value outside the second range
+ * results in STATE_FATAL while a value outside the first and inside the second
+ * range results in STATE_WARNING.
+ *
+ * Sensor masks and sensor information arrays are organised into pages. For
+ * backward compatibility, older host software can only use sensors in page 0.
+ * Bit 32 in the sensor mask was previously unused, and is no reserved for use
+ * as the next page flag.
+ *
+ * If the request does not contain a PAGE value then firmware will only return
+ * page 0 of sensor information, with bit 31 in the sensor mask cleared.
+ *
+ * If the request contains a PAGE value then firmware responds with the sensor
+ * mask and sensor information array for that page of sensors. In this case bit
+ * 31 in the mask is set if another page exists.
+ *
+ * Locks required: None Returns: 0
+ */
+#define MC_CMD_SENSOR_INFO 0x41
+#undef MC_CMD_0x41_PRIVILEGE_CTG
+
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SENSOR_INFO_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_IN_LEN 0
+
+/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4
+/* Which page of sensors to report.
+ *
+ * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit).
+ *
+ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
+ */
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+
+/* MC_CMD_SENSOR_INFO_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4
+#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+/* enum: Controller temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
+/* enum: Phy common temperature: degC */
+#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1
+/* enum: Controller cooling: bool */
+#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2
+/* enum: Phy 0 temperature: degC */
+#define MC_CMD_SENSOR_PHY0_TEMP 0x3
+/* enum: Phy 0 cooling: bool */
+#define MC_CMD_SENSOR_PHY0_COOLING 0x4
+/* enum: Phy 1 temperature: degC */
+#define MC_CMD_SENSOR_PHY1_TEMP 0x5
+/* enum: Phy 1 cooling: bool */
+#define MC_CMD_SENSOR_PHY1_COOLING 0x6
+/* enum: 1.0v power: mV */
+#define MC_CMD_SENSOR_IN_1V0 0x7
+/* enum: 1.2v power: mV */
+#define MC_CMD_SENSOR_IN_1V2 0x8
+/* enum: 1.8v power: mV */
+#define MC_CMD_SENSOR_IN_1V8 0x9
+/* enum: 2.5v power: mV */
+#define MC_CMD_SENSOR_IN_2V5 0xa
+/* enum: 3.3v power: mV */
+#define MC_CMD_SENSOR_IN_3V3 0xb
+/* enum: 12v power: mV */
+#define MC_CMD_SENSOR_IN_12V0 0xc
+/* enum: 1.2v analogue power: mV */
+#define MC_CMD_SENSOR_IN_1V2A 0xd
+/* enum: reference voltage: mV */
+#define MC_CMD_SENSOR_IN_VREF 0xe
+/* enum: AOE FPGA power: mV */
+#define MC_CMD_SENSOR_OUT_VAOE 0xf
+/* enum: AOE FPGA temperature: degC */
+#define MC_CMD_SENSOR_AOE_TEMP 0x10
+/* enum: AOE FPGA PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11
+/* enum: AOE PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_TEMP 0x12
+/* enum: Fan 0 speed: RPM */
+#define MC_CMD_SENSOR_FAN_0 0x13
+/* enum: Fan 1 speed: RPM */
+#define MC_CMD_SENSOR_FAN_1 0x14
+/* enum: Fan 2 speed: RPM */
+#define MC_CMD_SENSOR_FAN_2 0x15
+/* enum: Fan 3 speed: RPM */
+#define MC_CMD_SENSOR_FAN_3 0x16
+/* enum: Fan 4 speed: RPM */
+#define MC_CMD_SENSOR_FAN_4 0x17
+/* enum: AOE FPGA input power: mV */
+#define MC_CMD_SENSOR_IN_VAOE 0x18
+/* enum: AOE FPGA current: mA */
+#define MC_CMD_SENSOR_OUT_IAOE 0x19
+/* enum: AOE FPGA input current: mA */
+#define MC_CMD_SENSOR_IN_IAOE 0x1a
+/* enum: NIC power consumption: W */
+#define MC_CMD_SENSOR_NIC_POWER 0x1b
+/* enum: 0.9v power voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9 0x1c
+/* enum: 0.9v power current: mA */
+#define MC_CMD_SENSOR_IN_I0V9 0x1d
+/* enum: 1.2v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V2 0x1e
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f
+/* enum: 0.9v power voltage (at ADC): mV */
+#define MC_CMD_SENSOR_IN_0V9_ADC 0x20
+/* enum: Controller temperature 2: degC */
+#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21
+/* enum: Voltage regulator internal temperature: degC */
+#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22
+/* enum: 0.9V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23
+/* enum: 1.2V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24
+/* enum: controller internal temperature sensor voltage (internal ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25
+/* enum: controller internal temperature (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26
+/* enum: controller internal temperature sensor voltage (external ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27
+/* enum: controller internal temperature (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28
+/* enum: ambient temperature: degC */
+#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29
+/* enum: air flow: bool */
+#define MC_CMD_SENSOR_AIRFLOW 0x2a
+/* enum: voltage between VSS08D and VSS08D at CSR: mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
+/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+/* enum: Hotpoint temperature: degC */
+#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
+/* enum: Port 0 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e
+/* enum: Port 1 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f
+/* enum: Mop-up microcontroller reference voltage (millivolts) */
+#define MC_CMD_SENSOR_MUM_VCC 0x30
+/* enum: 0.9v power phase A voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_A 0x31
+/* enum: 0.9v power phase A current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_A 0x32
+/* enum: 0.9V voltage regulator phase A temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33
+/* enum: 0.9v power phase B voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_B 0x34
+/* enum: 0.9v power phase B current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_B 0x35
+/* enum: 0.9V voltage regulator phase B temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36
+/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37
+/* enum: CCOM AVREG 1v2 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38
+/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39
+/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a
+/* enum: CCOM RTS temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f
+/* enum: controller internal temperature sensor voltage on master core
+ * (internal ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40
+/* enum: controller internal temperature on master core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41
+/* enum: controller internal temperature sensor voltage on master core
+ * (external ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42
+/* enum: controller internal temperature on master core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43
+/* enum: controller internal temperature on slave core sensor voltage (internal
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44
+/* enum: controller internal temperature on slave core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45
+/* enum: controller internal temperature on slave core sensor voltage (external
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46
+/* enum: controller internal temperature on slave core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47
+/* enum: Voltage supplied to the SODIMMs from their power supply: mV */
+#define MC_CMD_SENSOR_SODIMM_VOUT 0x49
+/* enum: Temperature of SODIMM 0 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a
+/* enum: Temperature of SODIMM 1 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b
+/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY0_VCC 0x4c
+/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY1_VCC 0x4d
+/* enum: Controller die temperature (TDIODE): degC */
+#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e
+/* enum: Board temperature (front): degC */
+#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
+/* enum: Board temperature (back): degC */
+#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+#define MC_CMD_SENSOR_ENTRY_OFST 4
+#define MC_CMD_SENSOR_ENTRY_LEN 8
+#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
+#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
+#define MC_CMD_SENSOR_ENTRY_MINNUM 0
+#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
+
+/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO_OUT */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+/* MC_CMD_SENSOR_ENTRY_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_LEN 8 */
+/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
+/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */
+/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
+
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LBN 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_OFST 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LBN 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_OFST 4
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LBN 32
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_OFST 6
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LBN 48
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_SENSORS
+ * Returns the current reading from each sensor. DMAs an array of sensor
+ * readings, in order of sensor type (but without gaps for unimplemented
+ * sensors), into host memory. Each array element is a
+ * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword.
+ *
+ * If the request does not contain the LENGTH field then only sensors 0 to 30
+ * are reported, to avoid DMA buffer overflow in older host software. If the
+ * sensor reading require more space than the LENGTH allows, then return
+ * EINVAL.
+ *
+ * The MC will send a SENSOREVT event every time any sensor changes state. The
+ * driver is responsible for ensuring that it doesn't miss any events. The
+ * board will function normally if all sensors are in STATE_OK or
+ * STATE_WARNING. Otherwise the board should not be expected to function.
+ */
+#define MC_CMD_READ_SENSORS 0x42
+#undef MC_CMD_0x42_PRIVILEGE_CTG
+
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_SENSORS_IN msgrequest */
+#define MC_CMD_READ_SENSORS_IN_LEN 8
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
+#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
+/* Size in bytes of host buffer. */
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+
+/* MC_CMD_READ_SENSORS_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_OUT_LEN 0
+
+/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0
+
+/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
+/* enum: Ok. */
+#define MC_CMD_SENSOR_STATE_OK 0x0
+/* enum: Breached warning threshold. */
+#define MC_CMD_SENSOR_STATE_WARNING 0x1
+/* enum: Breached fatal threshold. */
+#define MC_CMD_SENSOR_STATE_FATAL 0x2
+/* enum: Fault with sensor. */
+#define MC_CMD_SENSOR_STATE_BROKEN 0x3
+/* enum: Sensor is working but does not currently have a reading. */
+#define MC_CMD_SENSOR_STATE_NO_READING 0x4
+/* enum: Sensor initialisation failed. */
+#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_STATE
+ * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot
+ * (e.g. due to missing or corrupted firmware). Locks required: None. Return
+ * code: 0
+ */
+#define MC_CMD_GET_PHY_STATE 0x43
+#undef MC_CMD_0x43_PRIVILEGE_CTG
+
+#define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PHY_STATE_IN msgrequest */
+#define MC_CMD_GET_PHY_STATE_IN_LEN 0
+
+/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
+#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
+#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+/* enum: Ok. */
+#define MC_CMD_PHY_STATE_OK 0x1
+/* enum: Faulty. */
+#define MC_CMD_PHY_STATE_ZOMBIE 0x2
+
+
+/***********************************/
+/* MC_CMD_SETUP_8021QBB
+ * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
+ * disable 802.Qbb for a given priority.
+ */
+#define MC_CMD_SETUP_8021QBB 0x44
+
+/* MC_CMD_SETUP_8021QBB_IN msgrequest */
+#define MC_CMD_SETUP_8021QBB_IN_LEN 32
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32
+
+/* MC_CMD_SETUP_8021QBB_OUT msgresponse */
+#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_GET
+ * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_GET 0x45
+#undef MC_CMD_0x45_PRIVILEGE_CTG
+
+#define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_GET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
+
+/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
+ * Add a protocol offload to NIC for lights-out state. Locks required: None.
+ * Returns: 0, ENOSYS
+ */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
+#undef MC_CMD_0x46_PRIVILEGE_CTG
+
+#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
+ * Remove a protocol offload from NIC for lights-out state. Locks required:
+ * None. Returns: 0, ENOSYS
+ */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
+#undef MC_CMD_0x47_PRIVILEGE_CTG
+
+#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAC_RESET_RESTORE
+ * Restore MAC after block reset. Locks required: None. Returns: 0.
+ */
+#define MC_CMD_MAC_RESET_RESTORE 0x48
+
+/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */
+#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
+
+/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */
+#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TESTASSERT
+ * Deliberately trigger an assert-detonation in the firmware for testing
+ * purposes (i.e. to allow tests that the driver copes gracefully). Locks
+ * required: None Returns: 0
+ */
+#define MC_CMD_TESTASSERT 0x49
+#undef MC_CMD_0x49_PRIVILEGE_CTG
+
+#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TESTASSERT_IN msgrequest */
+#define MC_CMD_TESTASSERT_IN_LEN 0
+
+/* MC_CMD_TESTASSERT_OUT msgresponse */
+#define MC_CMD_TESTASSERT_OUT_LEN 0
+
+/* MC_CMD_TESTASSERT_V2_IN msgrequest */
+#define MC_CMD_TESTASSERT_V2_IN_LEN 4
+/* How to provoke the assertion */
+#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
+/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
+ * you're testing firmware, this is what you want.
+ */
+#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0
+/* enum: Assert using assert(0); */
+#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1
+/* enum: Deliberately trigger a watchdog */
+#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2
+/* enum: Deliberately trigger a trap by loading from an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3
+/* enum: Deliberately trigger a trap by storing to an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4
+/* enum: Jump to an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5
+
+/* MC_CMD_TESTASSERT_V2_OUT msgresponse */
+#define MC_CMD_TESTASSERT_V2_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WORKAROUND
+ * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
+ * understand the given workaround number - which should not be treated as a
+ * hard error by client code. This op does not imply any semantics about each
+ * workaround, that's between the driver and the mcfw on a per-workaround
+ * basis. Locks required: None. Returns: 0, EINVAL .
+ */
+#define MC_CMD_WORKAROUND 0x4a
+#undef MC_CMD_0x4a_PRIVILEGE_CTG
+
+#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_WORKAROUND_IN msgrequest */
+#define MC_CMD_WORKAROUND_IN_LEN 8
+/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
+#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+/* enum: Bug 17230 work around. */
+#define MC_CMD_WORKAROUND_BUG17230 0x1
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_WORKAROUND_BUG35388 0x2
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_WORKAROUND_BUG35017 0x3
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_WORKAROUND_BUG41750 0x4
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_WORKAROUND_BUG42008 0x5
+/* enum: Bug 26807 features present in firmware (multicast filter chaining)
+ * This feature cannot be turned on/off while there are any filters already
+ * present. The behaviour in such case depends on the acting client's privilege
+ * level. If the client has the admin privilege, then all functions that have
+ * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise
+ * the command will fail with MC_CMD_ERR_FILTERS_PRESENT.
+ */
+#define MC_CMD_WORKAROUND_BUG26807 0x6
+/* enum: Bug 61265 work around (broken EVQ TMR writes). */
+#define MC_CMD_WORKAROUND_BUG61265 0x7
+/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable
+ * the workaround
+ */
+#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+
+/* MC_CMD_WORKAROUND_OUT msgresponse */
+#define MC_CMD_WORKAROUND_OUT_LEN 0
+
+/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used
+ * when (TYPE == MC_CMD_WORKAROUND_BUG26807)
+ */
+#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4
+#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_MEDIA_INFO
+ * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
+ * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG
+ * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
+ * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
+ * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
+ * Anything else: currently undefined. Locks required: None. Return code: 0.
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+#undef MC_CMD_0x4b_PRIVILEGE_CTG
+
+#define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TEST
+ * Test a particular NVRAM partition for valid contents (where "valid" depends
+ * on the type of partition).
+ */
+#define MC_CMD_NVRAM_TEST 0x4c
+#undef MC_CMD_0x4c_PRIVILEGE_CTG
+
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_TEST_IN msgrequest */
+#define MC_CMD_NVRAM_TEST_IN_LEN 4
+#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_TEST_OUT msgresponse */
+#define MC_CMD_NVRAM_TEST_OUT_LEN 4
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+/* enum: Passed. */
+#define MC_CMD_NVRAM_TEST_PASS 0x0
+/* enum: Failed. */
+#define MC_CMD_NVRAM_TEST_FAIL 0x1
+/* enum: Not supported. */
+#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2
+
+
+/***********************************/
+/* MC_CMD_MRSFP_TWEAK
+ * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
+ * I2C I/O expander bits are always read; if equaliser parameters are supplied,
+ * they are configured first. Locks required: None. Return code: 0, EINVAL.
+ */
+#define MC_CMD_MRSFP_TWEAK 0x4d
+
+/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
+/* 0-6 low->high de-emph. */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+/* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+/* 0-8 0-8 low->high boost */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+/* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+
+/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
+
+/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
+#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+/* input bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+/* output bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+/* direction */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+/* enum: Out. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
+/* enum: In. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
+
+
+/***********************************/
+/* MC_CMD_SENSOR_SET_LIMS
+ * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
+ * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
+ * of range.
+ */
+#define MC_CMD_SENSOR_SET_LIMS 0x4e
+#undef MC_CMD_0x4e_PRIVILEGE_CTG
+
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+
+/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
+#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RESOURCE_LIMITS
+ */
+#define MC_CMD_GET_RESOURCE_LIMITS 0x4f
+
+/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */
+#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0
+
+/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+
+
+/***********************************/
+/* MC_CMD_NVRAM_PARTITIONS
+ * Reads the list of available virtual NVRAM partition types. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_PARTITIONS 0x51
+#undef MC_CMD_0x51_PRIVILEGE_CTG
+
+#define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
+#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
+
+/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
+/* total number of partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+/* type ID code for each of NUM_PARTITIONS partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62
+
+
+/***********************************/
+/* MC_CMD_NVRAM_METADATA
+ * Reads soft metadata for a virtual NVRAM partition type. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_METADATA 0x52
+#undef MC_CMD_0x52_PRIVILEGE_CTG
+
+#define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_METADATA_IN msgrequest */
+#define MC_CMD_NVRAM_METADATA_IN_LEN 4
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+
+/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
+#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
+#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252
+#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
+/* Subtype ID code for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+/* 1st component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
+/* 2nd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2
+/* 3rd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2
+/* 4th component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2
+/* Zero-terminated string describing the content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232
+
+
+/***********************************/
+/* MC_CMD_GET_MAC_ADDRESSES
+ * Returns the base MAC, count and stride for the requesting function
+ */
+#define MC_CMD_GET_MAC_ADDRESSES 0x55
+#undef MC_CMD_0x55_PRIVILEGE_CTG
+
+#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
+
+/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16
+/* Base MAC address */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6
+/* Padding */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
+/* Number of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+/* Spacing of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+
+
+/***********************************/
+/* MC_CMD_CLP
+ * Perform a CLP related operation
+ */
+#define MC_CMD_CLP 0x56
+#undef MC_CMD_0x56_PRIVILEGE_CTG
+
+#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CLP_IN msgrequest */
+#define MC_CMD_CLP_IN_LEN 4
+/* Sub operation */
+#define MC_CMD_CLP_IN_OP_OFST 0
+/* enum: Return to factory default settings */
+#define MC_CMD_CLP_OP_DEFAULT 0x1
+/* enum: Set MAC address */
+#define MC_CMD_CLP_OP_SET_MAC 0x2
+/* enum: Get MAC address */
+#define MC_CMD_CLP_OP_GET_MAC 0x3
+/* enum: Set UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_SET_BOOT 0x4
+/* enum: Get UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_GET_BOOT 0x5
+
+/* MC_CMD_CLP_OUT msgresponse */
+#define MC_CMD_CLP_OUT_LEN 0
+
+/* MC_CMD_CLP_IN_DEFAULT msgrequest */
+#define MC_CMD_CLP_IN_DEFAULT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
+#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
+
+/* MC_CMD_CLP_IN_SET_MAC msgrequest */
+#define MC_CMD_CLP_IN_SET_MAC_LEN 12
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MAC address assigned to port */
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_SET_MAC_LEN 0
+
+/* MC_CMD_CLP_IN_GET_MAC msgrequest */
+#define MC_CMD_CLP_IN_GET_MAC_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
+/* MAC address assigned to port */
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* Boot flag */
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
+
+/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0
+
+/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
+/* Boot flag */
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
+
+
+/***********************************/
+/* MC_CMD_MUM
+ * Perform a MUM operation
+ */
+#define MC_CMD_MUM 0x57
+#undef MC_CMD_0x57_PRIVILEGE_CTG
+
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MUM_IN msgrequest */
+#define MC_CMD_MUM_IN_LEN 4
+#define MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define MC_CMD_MUM_IN_OP_LBN 0
+#define MC_CMD_MUM_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to MUM */
+#define MC_CMD_MUM_OP_NULL 0x1
+/* enum: Get MUM version */
+#define MC_CMD_MUM_OP_GET_VERSION 0x2
+/* enum: Issue raw I2C command to MUM */
+#define MC_CMD_MUM_OP_RAW_CMD 0x3
+/* enum: Read from registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_READ 0x4
+/* enum: Write to registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_WRITE 0x5
+/* enum: Control UART logging. */
+#define MC_CMD_MUM_OP_LOG 0x6
+/* enum: Operations on MUM GPIO lines */
+#define MC_CMD_MUM_OP_GPIO 0x7
+/* enum: Get sensor readings from MUM */
+#define MC_CMD_MUM_OP_READ_SENSORS 0x8
+/* enum: Initiate clock programming on the MUM */
+#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
+/* enum: Initiate FPGA load from flash on the MUM */
+#define MC_CMD_MUM_OP_FPGA_LOAD 0xa
+/* enum: Request sensor reading from MUM ADC resulting from earlier request via
+ * MUM ATB
+ */
+#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
+/* enum: Send commands relating to the QSFP ports via the MUM for PHY
+ * operations
+ */
+#define MC_CMD_MUM_OP_QSFP 0xc
+/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage
+ * level) from MUM
+ */
+#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd
+
+/* MC_CMD_MUM_IN_NULL msgrequest */
+#define MC_CMD_MUM_IN_NULL_LEN 4
+/* MUM cmd header */
+#define MC_CMD_MUM_IN_CMD_OFST 0
+
+/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
+#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_READ_LEN 16
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to read from registers of */
+#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE 0x1
+/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
+/* 32-bit address to read from */
+#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
+/* Number of words to read. */
+#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+
+/* MC_CMD_MUM_IN_WRITE msgrequest */
+#define MC_CMD_MUM_IN_WRITE_LENMIN 16
+#define MC_CMD_MUM_IN_WRITE_LENMAX 252
+#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to write to registers of */
+#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+/* MC_CMD_MUM_DEV_HITTITE 0x1 */
+/* 32-bit address to write to */
+#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+/* Words to write */
+#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
+
+/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
+#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
+#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MUM I2C cmd code */
+#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+/* Number of bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+/* Number of bytes to read */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+/* Bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
+
+/* MC_CMD_MUM_IN_LOG msgrequest */
+#define MC_CMD_MUM_IN_LOG_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
+
+/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
+#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/* Enable/disable debug output to UART */
+#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+
+/* MC_CMD_MUM_IN_GPIO msgrequest */
+#define MC_CMD_MUM_IN_GPIO_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
+
+/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
+#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
+
+/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Bit-mask of clocks to be programmed */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
+/* Control flags for clock programming */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1
+
+/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
+#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Enable/Disable FPGA config from flash */
+#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+
+/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
+#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_QSFP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
+#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
+#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
+#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+
+/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
+#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_OUT msgresponse */
+#define MC_CMD_MUM_OUT_LEN 0
+
+/* MC_CMD_MUM_OUT_NULL msgresponse */
+#define MC_CMD_MUM_OUT_NULL_LEN 0
+
+/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
+#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
+#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
+/* returned data */
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
+
+/* MC_CMD_MUM_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_READ_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
+#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
+
+/* MC_CMD_MUM_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG msgresponse */
+#define MC_CMD_MUM_OUT_LOG_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
+#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
+/* The first 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
+/* The first 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
+
+/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+
+/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
+#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
+
+/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
+
+/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
+
+/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+
+/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+
+/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
+/* Discrete (soldered) DDR resistor strap info */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
+/* Number of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+/* Array of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8
+/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0
+/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1
+/* enum: Total number of SODIMM banks */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */
+/* enum: Values 5-15 are reserved for future usage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4
+/* enum: No module present */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0
+/* enum: Module present supported and powered on */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1
+/* enum: Module present but bad type */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2
+/* enum: Module present but incompatible voltage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3
+/* enum: Module present but unknown SPD */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4
+/* enum: Module present but slot cannot support it */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5
+/* enum: Modules may or may not be present, but cannot establish contact by I2C
+ */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12
+
+/* MC_CMD_RESOURCE_SPECIFIER enum */
+/* enum: Any */
+#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
+/* enum: None */
+#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe
+
+/* EVB_PORT_ID structuredef */
+#define EVB_PORT_ID_LEN 4
+#define EVB_PORT_ID_PORT_ID_OFST 0
+/* enum: An invalid port handle. */
+#define EVB_PORT_ID_NULL 0x0
+/* enum: The port assigned to this function.. */
+#define EVB_PORT_ID_ASSIGNED 0x1000000
+/* enum: External network port 0 */
+#define EVB_PORT_ID_MAC0 0x2000000
+/* enum: External network port 1 */
+#define EVB_PORT_ID_MAC1 0x2000001
+/* enum: External network port 2 */
+#define EVB_PORT_ID_MAC2 0x2000002
+/* enum: External network port 3 */
+#define EVB_PORT_ID_MAC3 0x2000003
+#define EVB_PORT_ID_PORT_ID_LBN 0
+#define EVB_PORT_ID_PORT_ID_WIDTH 32
+
+/* EVB_VLAN_TAG structuredef */
+#define EVB_VLAN_TAG_LEN 2
+/* The VLAN tag value */
+#define EVB_VLAN_TAG_VLAN_ID_LBN 0
+#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
+#define EVB_VLAN_TAG_MODE_LBN 12
+#define EVB_VLAN_TAG_MODE_WIDTH 4
+/* enum: Insert the VLAN. */
+#define EVB_VLAN_TAG_INSERT 0x0
+/* enum: Replace the VLAN if already present. */
+#define EVB_VLAN_TAG_REPLACE 0x1
+
+/* BUFTBL_ENTRY structuredef */
+#define BUFTBL_ENTRY_LEN 12
+/* the owner ID */
+#define BUFTBL_ENTRY_OID_OFST 0
+#define BUFTBL_ENTRY_OID_LEN 2
+#define BUFTBL_ENTRY_OID_LBN 0
+#define BUFTBL_ENTRY_OID_WIDTH 16
+/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
+#define BUFTBL_ENTRY_PGSZ_OFST 2
+#define BUFTBL_ENTRY_PGSZ_LEN 2
+#define BUFTBL_ENTRY_PGSZ_LBN 16
+#define BUFTBL_ENTRY_PGSZ_WIDTH 16
+/* the raw 64-bit address field from the SMC, not adjusted for page size */
+#define BUFTBL_ENTRY_RAWADDR_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_LEN 8
+#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
+#define BUFTBL_ENTRY_RAWADDR_LBN 32
+#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
+
+/* NVRAM_PARTITION_TYPE structuredef */
+#define NVRAM_PARTITION_TYPE_LEN 2
+#define NVRAM_PARTITION_TYPE_ID_OFST 0
+#define NVRAM_PARTITION_TYPE_ID_LEN 2
+/* enum: Primary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
+/* enum: Secondary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
+/* enum: Expansion ROM partition */
+#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
+/* enum: Static configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
+/* enum: Dynamic configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
+/* enum: Expansion ROM configuration data for port 0 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
+/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600
+/* enum: Expansion ROM configuration data for port 1 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
+/* enum: Expansion ROM configuration data for port 2 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602
+/* enum: Expansion ROM configuration data for port 3 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
+/* enum: Non-volatile log output partition */
+#define NVRAM_PARTITION_TYPE_LOG 0x700
+/* enum: Non-volatile log output of second core on dual-core device */
+#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
+/* enum: Device state dump output partition */
+#define NVRAM_PARTITION_TYPE_DUMP 0x800
+/* enum: Application license key storage partition */
+#define NVRAM_PARTITION_TYPE_LICENSE 0x900
+/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
+/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
+/* enum: Primary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA 0xb00
+/* enum: Secondary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01
+/* enum: FC firmware partition */
+#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02
+/* enum: FC License partition */
+#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03
+/* enum: Non-volatile log output partition for FC */
+#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
+/* enum: MUM firmware partition */
+#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
+/* enum: MUM Non-volatile log output partition. */
+#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
+/* enum: MUM Application table partition. */
+#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02
+/* enum: MUM boot rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03
+/* enum: MUM production signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04
+/* enum: MUM user signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05
+/* enum: MUM fuses and lockbits partition. */
+#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
+/* enum: UEFI expansion ROM if separate from PXE */
+#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
+/* enum: Spare partition 0 */
+#define NVRAM_PARTITION_TYPE_SPARE_0 0x1000
+/* enum: Used for XIP code of shmbooted images */
+#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100
+/* enum: Spare partition 2 */
+#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200
+/* enum: Manufacturing partition. Used during manufacture to pass information
+ * between XJTAG and Manftest.
+ */
+#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300
+/* enum: Spare partition 4 */
+#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
+/* enum: Spare partition 5 */
+#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500
+/* enum: Partition for reporting MC status. See mc_flash_layout.h
+ * medford_mc_status_hdr_t for layout on Medford.
+ */
+#define NVRAM_PARTITION_TYPE_STATUS 0x1600
+/* enum: Start of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
+/* enum: End of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
+/* enum: Recovery partition map (provided if real map is missing or corrupt) */
+#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
+/* enum: Partition map (real map as stored in flash) */
+#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
+#define NVRAM_PARTITION_TYPE_ID_LBN 0
+#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
+
+/* LICENSED_APP_ID structuredef */
+#define LICENSED_APP_ID_LEN 4
+#define LICENSED_APP_ID_ID_OFST 0
+/* enum: OpenOnload */
+#define LICENSED_APP_ID_ONLOAD 0x1
+/* enum: PTP timestamping */
+#define LICENSED_APP_ID_PTP 0x2
+/* enum: SolarCapture Pro */
+#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+/* enum: SolarSecure filter engine */
+#define LICENSED_APP_ID_SOLARSECURE 0x8
+/* enum: Performance monitor */
+#define LICENSED_APP_ID_PERF_MONITOR 0x10
+/* enum: SolarCapture Live */
+#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20
+/* enum: Capture SolarSystem */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40
+/* enum: Network Access Control */
+#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80
+/* enum: TCP Direct */
+#define LICENSED_APP_ID_TCP_DIRECT 0x100
+/* enum: Low Latency */
+#define LICENSED_APP_ID_LOW_LATENCY 0x200
+/* enum: SolarCapture Tap */
+#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400
+/* enum: Capture SolarSystem 40G */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800
+/* enum: Capture SolarSystem 1G */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000
+#define LICENSED_APP_ID_ID_LBN 0
+#define LICENSED_APP_ID_ID_WIDTH 32
+
+/* LICENSED_FEATURES structuredef */
+#define LICENSED_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_FEATURES_MASK_OFST 0
+#define LICENSED_FEATURES_MASK_LEN 8
+#define LICENSED_FEATURES_MASK_LO_OFST 0
+#define LICENSED_FEATURES_MASK_HI_OFST 4
+#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_FEATURES_PIO_LBN 1
+#define LICENSED_FEATURES_PIO_WIDTH 1
+#define LICENSED_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_FEATURES_CLOCK_LBN 3
+#define LICENSED_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
+#define LICENSED_FEATURES_MASK_LBN 0
+#define LICENSED_FEATURES_MASK_WIDTH 64
+
+/* LICENSED_V3_APPS structuredef */
+#define LICENSED_V3_APPS_LEN 8
+/* Bitmask of licensed applications */
+#define LICENSED_V3_APPS_MASK_OFST 0
+#define LICENSED_V3_APPS_MASK_LEN 8
+#define LICENSED_V3_APPS_MASK_LO_OFST 0
+#define LICENSED_V3_APPS_MASK_HI_OFST 4
+#define LICENSED_V3_APPS_ONLOAD_LBN 0
+#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
+#define LICENSED_V3_APPS_PTP_LBN 1
+#define LICENSED_V3_APPS_PTP_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1
+#define LICENSED_V3_APPS_SOLARSECURE_LBN 3
+#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1
+#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4
+#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
+#define LICENSED_V3_APPS_TCP_DIRECT_LBN 8
+#define LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1
+#define LICENSED_V3_APPS_LOW_LATENCY_LBN 9
+#define LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10
+#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
+#define LICENSED_V3_APPS_MASK_LBN 0
+#define LICENSED_V3_APPS_MASK_WIDTH 64
+
+/* LICENSED_V3_FEATURES structuredef */
+#define LICENSED_V3_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_V3_FEATURES_MASK_OFST 0
+#define LICENSED_V3_FEATURES_MASK_LEN 8
+#define LICENSED_V3_FEATURES_MASK_LO_OFST 0
+#define LICENSED_V3_FEATURES_MASK_HI_OFST 4
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_V3_FEATURES_PIO_LBN 1
+#define LICENSED_V3_FEATURES_PIO_WIDTH 1
+#define LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_V3_FEATURES_CLOCK_LBN 3
+#define LICENSED_V3_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
+#define LICENSED_V3_FEATURES_MASK_LBN 0
+#define LICENSED_V3_FEATURES_MASK_WIDTH 64
+
+/* TX_TIMESTAMP_EVENT structuredef */
+#define TX_TIMESTAMP_EVENT_LEN 6
+/* lower 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16
+/* Type of TX event, ordinary TX completion, low or high part of TX timestamp
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
+/* enum: This is a TX completion event, not a timestamp */
+#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0
+/* enum: This is the low part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51
+/* enum: This is the high part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8
+/* upper 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16
+
+/* RSS_MODE structuredef */
+#define RSS_MODE_LEN 1
+/* The RSS mode for a particular packet type is a value from 0 - 15 which can
+ * be considered as 4 bits selecting which fields are included in the hash. (A
+ * value 0 effectively disables RSS spreading for the packet type.) The YAML
+ * generation tools require this structure to be a whole number of bytes wide,
+ * but only 4 bits are relevant.
+ */
+#define RSS_MODE_HASH_SELECTOR_OFST 0
+#define RSS_MODE_HASH_SELECTOR_LEN 1
+#define RSS_MODE_HASH_SRC_ADDR_LBN 0
+#define RSS_MODE_HASH_SRC_ADDR_WIDTH 1
+#define RSS_MODE_HASH_DST_ADDR_LBN 1
+#define RSS_MODE_HASH_DST_ADDR_WIDTH 1
+#define RSS_MODE_HASH_SRC_PORT_LBN 2
+#define RSS_MODE_HASH_SRC_PORT_WIDTH 1
+#define RSS_MODE_HASH_DST_PORT_LBN 3
+#define RSS_MODE_HASH_DST_PORT_WIDTH 1
+#define RSS_MODE_HASH_SELECTOR_LBN 0
+#define RSS_MODE_HASH_SELECTOR_WIDTH 8
+
+/* CTPIO_STATS_MAP structuredef */
+#define CTPIO_STATS_MAP_LEN 4
+/* The (function relative) VI number */
+#define CTPIO_STATS_MAP_VI_OFST 0
+#define CTPIO_STATS_MAP_VI_LEN 2
+#define CTPIO_STATS_MAP_VI_LBN 0
+#define CTPIO_STATS_MAP_VI_WIDTH 16
+/* The target bucket for the VI */
+#define CTPIO_STATS_MAP_BUCKET_OFST 2
+#define CTPIO_STATS_MAP_BUCKET_LEN 2
+#define CTPIO_STATS_MAP_BUCKET_LBN 16
+#define CTPIO_STATS_MAP_BUCKET_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_REGS
+ * Get a dump of the MCPU registers
+ */
+#define MC_CMD_READ_REGS 0x50
+#undef MC_CMD_0x50_PRIVILEGE_CTG
+
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_REGS_IN msgrequest */
+#define MC_CMD_READ_REGS_IN_LEN 0
+
+/* MC_CMD_READ_REGS_OUT msgresponse */
+#define MC_CMD_READ_REGS_OUT_LEN 308
+/* Whether the corresponding register entry contains a valid value */
+#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
+#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
+/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
+ * fir, fp)
+ */
+#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
+#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
+#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
+
+
+/***********************************/
+/* MC_CMD_INIT_EVQ
+ * Set up an event queue according to the supplied parameters. The IN arguments
+ * end with an address for each 4k of host memory required to back the EVQ.
+ */
+#define MC_CMD_INIT_EVQ 0x80
+#undef MC_CMD_0x80_PRIVILEGE_CTG
+
+#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_EVQ_IN msgrequest */
+#define MC_CMD_INIT_EVQ_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+/* tbd */
+#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
+
+/* MC_CMD_INIT_EVQ_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_OUT_LEN 4
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+
+/* MC_CMD_INIT_EVQ_V2_IN msgrequest */
+#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_V2_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12
+/* tbd */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LBN 7
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_WIDTH 4
+/* enum: All initialisation flags specified by host. */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL 0x0
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the lowest latency achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY 0x1
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the best throughput achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT 0x2
+/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by
+ * firmware based on licenses and firmware variant. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64
+
+/* MC_CMD_INIT_EVQ_V2_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0
+/* Actual configuration applied on the card */
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_LBN 2
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
+
+/* QUEUE_CRC_MODE structuredef */
+#define QUEUE_CRC_MODE_LEN 1
+#define QUEUE_CRC_MODE_MODE_LBN 0
+#define QUEUE_CRC_MODE_MODE_WIDTH 4
+/* enum: No CRC. */
+#define QUEUE_CRC_MODE_NONE 0x0
+/* enum: CRC Fiber channel over ethernet. */
+#define QUEUE_CRC_MODE_FCOE 0x1
+/* enum: CRC (digest) iSCSI header only. */
+#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
+/* enum: CRC (digest) iSCSI header and payload. */
+#define QUEUE_CRC_MODE_ISCSI 0x3
+/* enum: CRC Fiber channel over IP over ethernet. */
+#define QUEUE_CRC_MODE_FCOIPOE 0x4
+/* enum: CRC MPA. */
+#define QUEUE_CRC_MODE_MPA 0x5
+#define QUEUE_CRC_MODE_SPARE_LBN 4
+#define QUEUE_CRC_MODE_SPARE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_INIT_RXQ
+ * set up a receive queue according to the supplied parameters. The IN
+ * arguments end with an address for each 4k of host memory required to back
+ * the RXQ.
+ */
+#define MC_CMD_INIT_RXQ 0x81
+#undef MC_CMD_0x81_PRIVILEGE_CTG
+
+#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version
+ * in new code.
+ */
+#define MC_CMD_INIT_RXQ_IN_LENMIN 36
+#define MC_CMD_INIT_RXQ_IN_LENMAX 252
+#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_UNUSED_LBN 10
+#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+
+/* MC_CMD_INIT_RXQ_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_OUT_LEN 0
+
+/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_INIT_TXQ
+ */
+#define MC_CMD_INIT_TXQ 0x82
+#undef MC_CMD_0x82_PRIVILEGE_CTG
+
+#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version
+ * in new code.
+ */
+#define MC_CMD_INIT_TXQ_IN_LENMIN 36
+#define MC_CMD_INIT_TXQ_IN_LENMAX 252
+#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
+/* Flags related to Qbb flow control mode. */
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3
+
+/* MC_CMD_INIT_TXQ_OUT msgresponse */
+#define MC_CMD_INIT_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_EVQ
+ * Teardown an EVQ.
+ *
+ * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first
+ * or the operation will fail with EBUSY
+ */
+#define MC_CMD_FINI_EVQ 0x83
+#undef MC_CMD_0x83_PRIVILEGE_CTG
+
+#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_EVQ_IN msgrequest */
+#define MC_CMD_FINI_EVQ_IN_LEN 4
+/* Instance of EVQ to destroy. Should be the same instance as that previously
+ * passed to INIT_EVQ
+ */
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_EVQ_OUT msgresponse */
+#define MC_CMD_FINI_EVQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_RXQ
+ * Teardown a RXQ.
+ */
+#define MC_CMD_FINI_RXQ 0x84
+#undef MC_CMD_0x84_PRIVILEGE_CTG
+
+#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_RXQ_IN msgrequest */
+#define MC_CMD_FINI_RXQ_IN_LEN 4
+/* Instance of RXQ to destroy */
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_RXQ_OUT msgresponse */
+#define MC_CMD_FINI_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_TXQ
+ * Teardown a TXQ.
+ */
+#define MC_CMD_FINI_TXQ 0x85
+#undef MC_CMD_0x85_PRIVILEGE_CTG
+
+#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_TXQ_IN msgrequest */
+#define MC_CMD_FINI_TXQ_IN_LEN 4
+/* Instance of TXQ to destroy */
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_TXQ_OUT msgresponse */
+#define MC_CMD_FINI_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DRIVER_EVENT
+ * Generate an event on an EVQ belonging to the function issuing the command.
+ */
+#define MC_CMD_DRIVER_EVENT 0x86
+#undef MC_CMD_0x86_PRIVILEGE_CTG
+
+#define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DRIVER_EVENT_IN msgrequest */
+#define MC_CMD_DRIVER_EVENT_IN_LEN 12
+/* Handle of target EVQ */
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+/* Bits 0 - 63 of event */
+#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8
+
+/* MC_CMD_DRIVER_EVENT_OUT msgresponse */
+#define MC_CMD_DRIVER_EVENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_CMD
+ * Execute an arbitrary MCDI command on behalf of a different function, subject
+ * to security restrictions. The command to be proxied follows immediately
+ * afterward in the host buffer (or on the UART). This command supercedes
+ * MC_CMD_SET_FUNC, which remains available for Siena but now deprecated.
+ */
+#define MC_CMD_PROXY_CMD 0x5b
+#undef MC_CMD_0x5b_PRIVILEGE_CTG
+
+#define MC_CMD_0x5b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_CMD_IN msgrequest */
+#define MC_CMD_PROXY_CMD_IN_LEN 4
+/* The handle of the target function. */
+#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
+
+/* MC_CMD_PROXY_CMD_OUT msgresponse */
+#define MC_CMD_PROXY_CMD_OUT_LEN 0
+
+/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to
+ * manage proxied requests
+ */
+#define MC_PROXY_STATUS_BUFFER_LEN 16
+/* Handle allocated by the firmware for this proxy transaction */
+#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+/* enum: An invalid handle. */
+#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
+/* The requesting physical function number */
+#define MC_PROXY_STATUS_BUFFER_PF_OFST 4
+#define MC_PROXY_STATUS_BUFFER_PF_LEN 2
+#define MC_PROXY_STATUS_BUFFER_PF_LBN 32
+#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16
+/* The requesting virtual function number. Set to VF_NULL if the target is a
+ * PF.
+ */
+#define MC_PROXY_STATUS_BUFFER_VF_OFST 6
+#define MC_PROXY_STATUS_BUFFER_VF_LEN 2
+#define MC_PROXY_STATUS_BUFFER_VF_LBN 48
+#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16
+/* The target function RID. */
+#define MC_PROXY_STATUS_BUFFER_RID_OFST 8
+#define MC_PROXY_STATUS_BUFFER_RID_LEN 2
+#define MC_PROXY_STATUS_BUFFER_RID_LBN 64
+#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16
+/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */
+#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10
+#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2
+#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80
+#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16
+/* If a request is authorized rather than carried out by the host, this is the
+ * elevated privilege mask granted to the requesting function.
+ */
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PROXY_CONFIGURE
+ * Enable/disable authorization of MCDI requests from unprivileged functions by
+ * a designated admin function
+ */
+#define MC_CMD_PROXY_CONFIGURE 0x58
+#undef MC_CMD_0x58_PRIVILEGE_CTG
+
+#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
+#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108
+#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
+#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+/* Applies to all three buffers */
+#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+/* A bit mask defining which MCDI operations may be proxied */
+#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
+#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
+
+/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
+/* Applies to all three buffers */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
+/* A bit mask defining which MCDI operations may be proxied */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
+
+/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
+#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_COMPLETE
+ * Tells FW that a requested proxy operation has either been completed (by
+ * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the
+ * function that enabled proxying/authorization (by using
+ * MC_CMD_PROXY_CONFIGURE).
+ */
+#define MC_CMD_PROXY_COMPLETE 0x5f
+#undef MC_CMD_0x5f_PRIVILEGE_CTG
+
+#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
+#define MC_CMD_PROXY_COMPLETE_IN_LEN 12
+#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
+ * is stored in the REPLY_BUFF.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0
+/* enum: The operation has been authorized. The originating function may now
+ * try again.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1
+/* enum: The operation has been declined. */
+#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2
+/* enum: The authorization failed because the relevant application did not
+ * respond in time.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
+#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+
+/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
+#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_BUFTBL_CHUNK
+ * Allocate a set of buffer table entries using the specified owner ID. This
+ * operation allocates the required buffer table entries (and fails if it
+ * cannot do so). The buffer table entries will initially be zeroed.
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
+#undef MC_CMD_0x87_PRIVILEGE_CTG
+
+#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
+/* Owner ID to use */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+/* Size of buffer table pages to use, in bytes (note that only a few values are
+ * legal on any specific hardware).
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+/* Buffer table IDs for use in DMA descriptors. */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+
+
+/***********************************/
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
+ * Reprogram a set of buffer table entries in the specified chunk.
+ */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
+#undef MC_CMD_0x88_PRIVILEGE_CTG
+
+#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+/* ID */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+/* Num entries */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+/* Buffer table entry address */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FREE_BUFTBL_CHUNK
+ */
+#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
+#undef MC_CMD_0x89_PRIVILEGE_CTG
+
+#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
+
+/* PORT_CONFIG_ENTRY structuredef */
+#define PORT_CONFIG_ENTRY_LEN 16
+/* External port number (label) */
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
+/* Port core location */
+#define PORT_CONFIG_ENTRY_CORE_OFST 1
+#define PORT_CONFIG_ENTRY_CORE_LEN 1
+#define PORT_CONFIG_ENTRY_STANDALONE 0x0 /* enum */
+#define PORT_CONFIG_ENTRY_MASTER 0x1 /* enum */
+#define PORT_CONFIG_ENTRY_SLAVE 0x2 /* enum */
+#define PORT_CONFIG_ENTRY_CORE_LBN 8
+#define PORT_CONFIG_ENTRY_CORE_WIDTH 8
+/* Internal number (HW resource) relative to the core */
+#define PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
+#define PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
+#define PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
+#define PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
+/* Reserved */
+#define PORT_CONFIG_ENTRY_RSVD_OFST 3
+#define PORT_CONFIG_ENTRY_RSVD_LEN 1
+#define PORT_CONFIG_ENTRY_RSVD_LBN 24
+#define PORT_CONFIG_ENTRY_RSVD_WIDTH 8
+/* Bitmask of KR lanes used by the port */
+#define PORT_CONFIG_ENTRY_LANES_OFST 4
+#define PORT_CONFIG_ENTRY_LANES_LBN 32
+#define PORT_CONFIG_ENTRY_LANES_WIDTH 32
+/* Port capabilities (MC_CMD_PHY_CAP_*) */
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
+/* Reserved (align to 16 bytes) */
+#define PORT_CONFIG_ENTRY_RSVD2_OFST 12
+#define PORT_CONFIG_ENTRY_RSVD2_LBN 96
+#define PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_FILTER_OP
+ * Multiplexed MCDI call for filter operations
+ */
+#define MC_CMD_FILTER_OP 0x8a
+#undef MC_CMD_0x8a_PRIVILEGE_CTG
+
+#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FILTER_OP_IN msgrequest */
+#define MC_CMD_FILTER_OP_IN_LEN 108
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_IN_OP_OFST 0
+/* enum: single-recipient filter insert */
+#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
+/* enum: single-recipient filter remove */
+#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1
+/* enum: multi-recipient filter subscribe */
+#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2
+/* enum: multi-recipient filter unsubscribe */
+#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3
+/* enum: replace one recipient with another (warning - the filter handle may
+ * change)
+ */
+#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+/* Firmware defined register 1 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to
+ * include handling of VXLAN/NVGRE encapsulated frame filtering (which is
+ * supported on Medford only).
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_LEN 172
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0
+/* enum: Match Geneve traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1
+/* enum: Reserved for experimental development use */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_OUT msgresponse */
+#define MC_CMD_FILTER_OP_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+/* enum: guaranteed invalid filter handle (low 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff
+/* enum: guaranteed invalid filter handle (high 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff
+
+/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */
+#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_EXT_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_OUT/HANDLE */
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_INFO
+ * Get information related to the parser-dispatcher subsystem
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO 0xe4
+#undef MC_CMD_0xe4_PRIVILEGE_CTG
+
+#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+/* enum: read the list of supported RX filter matches */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
+/* enum: read flags indicating restrictions on filter insertion for the calling
+ * client
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
+/* enum: read properties relating to security rules (Medford-only; for use by
+ * SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3
+/* enum: read the list of supported RX filter matches for VXLAN/NVGRE
+ * encapsulated frames, which follow a different match sequence to normal
+ * frames (Medford only)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4
+
+/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* number of supported match types */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+/* array of supported match types (valid MATCH_FIELDS values for
+ * MC_CMD_FILTER_OP) sorted in decreasing priority order
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
+
+/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* bitfield of filter insertion restrictions */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+
+/* MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT msgresponse:
+ * GET_PARSER_DISP_INFO response format for OP_GET_SECURITY_RULE_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_LEN 36
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* a version number representing the set of rule lookups that are implemented
+ * by the currently running firmware
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_OFST 4
+/* enum: implements lookup sequences described in SF-114946-SW draft C */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0
+/* the number of nodes in the subnet map */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_OFST 8
+/* the number of entries in one subnet map node */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_OFST 12
+/* minimum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_OFST 16
+/* maximum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_OFST 20
+/* the number of entries in the local and remote port range maps */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_OFST 24
+/* minimum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_OFST 28
+/* maximum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_OFST 32
+
+
+/***********************************/
+/* MC_CMD_PARSER_DISP_RW
+ * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging.
+ * Please note that this interface is only of use to debug tools which have
+ * knowledge of firmware and hardware data structures; nothing here is intended
+ * for use by normal driver code.
+ */
+#define MC_CMD_PARSER_DISP_RW 0xe5
+#undef MC_CMD_0xe5_PRIVILEGE_CTG
+
+#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
+#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
+/* identifies the target of the operation */
+#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+/* enum: RX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
+/* enum: TX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
+/* enum: Lookup engine (with original metadata format) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
+/* enum: Lookup engine (with requested metadata format) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
+/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */
+#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
+/* enum: RX1 dispatcher CPU (only valid for Medford) */
+#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
+/* enum: Miscellaneous other state (only valid for Medford) */
+#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
+/* identifies the type of operation requested */
+#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
+/* enum: read a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
+/* enum: write a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
+/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
+#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
+/* data memory address (DICPU targets) or LUE index (LUE targets) */
+#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+/* selector (for MISC_STATE target) */
+#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
+/* enum: Port to datapath mapping */
+#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
+/* value to write (for DMEM writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
+/* value to write (for LUE writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
+
+/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */
+#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
+/* value read (for DMEM reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+/* value read (for LUE reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
+/* up to 8 32-bit words of additional soft state from the LUE manager (the
+ * exact content is firmware-dependent and intended only for debug use)
+ */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
+/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4
+#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
+#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
+
+
+/***********************************/
+/* MC_CMD_GET_PF_COUNT
+ * Get number of PFs on the device.
+ */
+#define MC_CMD_GET_PF_COUNT 0xb6
+#undef MC_CMD_0xb6_PRIVILEGE_CTG
+
+#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PF_COUNT_IN msgrequest */
+#define MC_CMD_GET_PF_COUNT_IN_LEN 0
+
+/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_GET_PF_COUNT_OUT_LEN 1
+/* Identifies the number of PFs on the device. */
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
+
+
+/***********************************/
+/* MC_CMD_SET_PF_COUNT
+ * Set number of PFs on the device.
+ */
+#define MC_CMD_SET_PF_COUNT 0xb7
+
+/* MC_CMD_SET_PF_COUNT_IN msgrequest */
+#define MC_CMD_SET_PF_COUNT_IN_LEN 4
+/* New number of PFs on the device. */
+#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+
+/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_ASSIGNMENT
+ * Get port assignment for current PCI function.
+ */
+#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
+#undef MC_CMD_0xb8_PRIVILEGE_CTG
+
+#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_ASSIGNMENT
+ * Set port assignment for current PCI function.
+ */
+#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
+#undef MC_CMD_0xb9_PRIVILEGE_CTG
+
+#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_VIS
+ * Allocate VIs for current PCI function.
+ */
+#define MC_CMD_ALLOC_VIS 0x8b
+#undef MC_CMD_0x8b_PRIVILEGE_CTG
+
+#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_ALLOC_VIS_IN msgrequest */
+#define MC_CMD_ALLOC_VIS_IN_LEN 8
+/* The minimum number of VIs that is acceptable */
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+/* The maximum number of VIs that would be useful */
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+
+/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
+ * Use extended version in new code.
+ */
+#define MC_CMD_ALLOC_VIS_OUT_LEN 8
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+
+/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+
+
+/***********************************/
+/* MC_CMD_FREE_VIS
+ * Free VIs for current PCI function. Any linked PIO buffers will be unlinked,
+ * but not freed.
+ */
+#define MC_CMD_FREE_VIS 0x8c
+#undef MC_CMD_0x8c_PRIVILEGE_CTG
+
+#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FREE_VIS_IN msgrequest */
+#define MC_CMD_FREE_VIS_IN_LEN 0
+
+/* MC_CMD_FREE_VIS_OUT msgresponse */
+#define MC_CMD_FREE_VIS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SRIOV_CFG
+ * Get SRIOV config for this PF.
+ */
+#define MC_CMD_GET_SRIOV_CFG 0xba
+#undef MC_CMD_0xba_PRIVILEGE_CTG
+
+#define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0
+
+/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+/* RID offset of each subsequent VF from the previous. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+
+
+/***********************************/
+/* MC_CMD_SET_SRIOV_CFG
+ * Set SRIOV config for this PF.
+ */
+#define MC_CMD_SET_SRIOV_CFG 0xbb
+#undef MC_CMD_0xbb_PRIVILEGE_CTG
+
+#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF, or 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+/* RID offset of each subsequent VF from the previous, 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+
+/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_ALLOC_INFO
+ * Get information about number of VI's and base VI number allocated to this
+ * function.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
+#undef MC_CMD_0x8d_PRIVILEGE_CTG
+
+#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
+#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
+
+/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
+
+
+/***********************************/
+/* MC_CMD_DUMP_VI_STATE
+ * For CmdClient use. Dump pertinent information on a specific absolute VI.
+ */
+#define MC_CMD_DUMP_VI_STATE 0x8e
+#undef MC_CMD_0x8e_PRIVILEGE_CTG
+
+#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
+#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
+/* The VI number to query. */
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+
+/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
+#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
+/* The PF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
+/* The VF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
+/* Base of VIs allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
+/* Count of VIs allocated to the owner function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
+/* Base interrupt vector allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
+/* Number of interrupt vectors allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
+/* Raw evq ptr table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
+/* Raw evq timer table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
+/* Reserved, currently 0. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_ALLOC_PIOBUF
+ * Allocate a push I/O buffer for later use with a tx queue.
+ */
+#define MC_CMD_ALLOC_PIOBUF 0x8f
+#undef MC_CMD_0x8f_PRIVILEGE_CTG
+
+#define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
+#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0
+
+/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */
+#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_FREE_PIOBUF
+ * Free a push I/O buffer.
+ */
+#define MC_CMD_FREE_PIOBUF 0x90
+#undef MC_CMD_0x90_PRIVILEGE_CTG
+
+#define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_FREE_PIOBUF_IN msgrequest */
+#define MC_CMD_FREE_PIOBUF_IN_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+
+/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
+#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_TLP_PROCESSING
+ * Get TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
+#undef MC_CMD_0xb0_PRIVILEGE_CTG
+
+#define MC_CMD_0xb0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
+/* VI number to get information for. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1
+/* Set no snoop bit for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SET_VI_TLP_PROCESSING
+ * Set TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
+#undef MC_CMD_0xb1_PRIVILEGE_CTG
+
+#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
+/* VI number to set information for. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1
+/* Set the no snoop bit for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS
+ * Get global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
+#undef MC_CMD_0xbc_PRIVILEGE_CTG
+
+#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+/* enum: MISC. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
+/* enum: IDO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
+/* enum: RO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
+/* enum: TPH Type. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
+
+
+/***********************************/
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS
+ * Set global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
+#undef MC_CMD_0xbd_PRIVILEGE_CTG
+
+#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SATELLITE_DOWNLOAD
+ * Download a new set of images to the satellite CPUs from the host.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD 0x91
+#undef MC_CMD_0x91_PRIVILEGE_CTG
+
+#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
+ * are subtle, and so downloads must proceed in a number of phases.
+ *
+ * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download
+ * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should
+ * be a checksum (a simple 32-bit sum) of the transferred data. An individual
+ * download may be aborted using CHUNK_ID_ABORT.
+ *
+ * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15),
+ * similar to PHASE_IMEMS.
+ *
+ * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * After any error (a requested abort is not considered to be an error) the
+ * sequence must be restarted from PHASE_RESET.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
+/* Download phase. (Note: the IDLE phase is used internally and is never valid
+ * in a command from the host.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
+/* Target for download. (These match the blob numbers defined in
+ * mc_flash_layout.h.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
+/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
+/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+/* enum: Last chunk, containing checksum rather than data */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
+/* enum: Abort download of this item */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
+/* Length of this chunk in bytes */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+/* Data for this chunk */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
+
+/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
+/* Same as MC_CMD_ERR field, but included as 0 in success cases */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+/* Extra status information */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+/* enum: Code download OK, completed. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
+/* enum: Code download aborted as requested. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
+/* enum: Code download OK so far, send next chunk. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
+/* enum: Download phases out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
+/* enum: Bad target for this phase */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
+/* enum: Chunk ID out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
+/* enum: Chunk length zero or too large */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
+/* enum: Checksum was incorrect */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
+
+
+/***********************************/
+/* MC_CMD_GET_CAPABILITIES
+ * Get device capabilities.
+ *
+ * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
+ * reference inherent device capabilities as opposed to current NVRAM config.
+ */
+#define MC_CMD_GET_CAPABILITIES 0xbe
+#undef MC_CMD_0xbe_PRIVILEGE_CTG
+
+#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+
+/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_V2_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+
+
+/***********************************/
+/* MC_CMD_V2_EXTN
+ * Encapsulation for a v2 extended command
+ */
+#define MC_CMD_V2_EXTN 0x7f
+
+/* MC_CMD_V2_EXTN_IN msgrequest */
+#define MC_CMD_V2_EXTN_IN_LEN 4
+/* the extended command number */
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
+/* the actual length of the encapsulated command (which is not in the v1
+ * header)
+ */
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
+#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
+#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 6
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_ALLOC
+ * Allocate a pacer bucket (for qau rp or a snapper test)
+ */
+#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
+#undef MC_CMD_0xb2_PRIVILEGE_CTG
+
+#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
+
+/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_FREE
+ * Free a pacer bucket
+ */
+#define MC_CMD_TCM_BUCKET_FREE 0xb3
+#undef MC_CMD_0xb3_PRIVILEGE_CTG
+
+#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+
+/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_INIT
+ * Initialise pacer bucket with a given rate
+ */
+#define MC_CMD_TCM_BUCKET_INIT 0xb4
+#undef MC_CMD_0xb4_PRIVILEGE_CTG
+
+#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+
+/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+/* the desired maximum fill level */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+
+/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_TXQ_INIT
+ * Initialise txq in pacer with given options or set options
+ */
+#define MC_CMD_TCM_TXQ_INIT 0xb5
+#undef MC_CMD_0xb5_PRIVILEGE_CTG
+
+#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
+#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
+/* the txq id */
+#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+/* bitmask of the priority queues this txq is inserted into when inserted. */
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
+/* the reaction point (RP) bucket */
+#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+
+/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
+/* the txq id */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+/* bitmask of the priority queues this txq is inserted into when inserted. */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
+/* the reaction point (RP) bucket */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+
+/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
+#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LINK_PIOBUF
+ * Link a push I/O buffer to a TxQ
+ */
+#define MC_CMD_LINK_PIOBUF 0x92
+#undef MC_CMD_0x92_PRIVILEGE_CTG
+
+#define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_LINK_PIOBUF_IN msgrequest */
+#define MC_CMD_LINK_PIOBUF_IN_LEN 8
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+/* Function Local Instance (VI) number. */
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+
+/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_LINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UNLINK_PIOBUF
+ * Unlink a push I/O buffer from a TxQ
+ */
+#define MC_CMD_UNLINK_PIOBUF 0x93
+#undef MC_CMD_0x93_PRIVILEGE_CTG
+
+#define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
+#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
+/* Function Local Instance (VI) number. */
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+
+/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_ALLOC
+ * allocate and initialise a v-switch.
+ */
+#define MC_CMD_VSWITCH_ALLOC 0x94
+#undef MC_CMD_0x94_PRIVILEGE_CTG
+
+#define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
+#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
+/* The port to connect to the v-switch's upstream port. */
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of v-switch to create. */
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+/* enum: VLAN */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
+/* enum: VEB */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
+/* enum: MUX */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4
+/* enum: Snapper specific; semantics TBD */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5
+/* Flags controlling v-port creation */
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
+ * this must be one or greated, and the attached v-ports must have exactly this
+ * number of tags. For other v-switch types, this must be zero of greater, and
+ * is an upper limit on the number of VLAN tags for attached v-ports. An error
+ * will be returned if existing configuration means we can't support attached
+ * v-ports with this number of tags.
+ */
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+
+/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
+#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_FREE
+ * de-allocate a v-switch.
+ */
+#define MC_CMD_VSWITCH_FREE 0x95
+#undef MC_CMD_0x95_PRIVILEGE_CTG
+
+#define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_FREE_IN msgrequest */
+#define MC_CMD_VSWITCH_FREE_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
+#define MC_CMD_VSWITCH_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_QUERY
+ * read some config of v-switch. For now this command is an empty placeholder.
+ * It may be used to check if a v-switch is connected to a given EVB port (if
+ * not, then the command returns ENOENT).
+ */
+#define MC_CMD_VSWITCH_QUERY 0x63
+#undef MC_CMD_0x63_PRIVILEGE_CTG
+
+#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_QUERY_IN msgrequest */
+#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
+#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_ALLOC
+ * allocate a v-port.
+ */
+#define MC_CMD_VPORT_ALLOC 0x96
+#undef MC_CMD_0x96_PRIVILEGE_CTG
+
+#define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_ALLOC_IN msgrequest */
+#define MC_CMD_VPORT_ALLOC_IN_LEN 20
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of the new v-port. */
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+/* enum: VLAN (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
+/* enum: VEB (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3
+/* enum: A normal v-port receives packets which match a specified MAC and/or
+ * VLAN.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4
+/* enum: An expansion v-port packets traffic which don't match any other
+ * v-port.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5
+/* enum: An test v-port receives packets which match any filters installed by
+ * its downstream components.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
+/* Flags controlling v-port creation */
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+
+/* MC_CMD_VPORT_ALLOC_OUT msgresponse */
+#define MC_CMD_VPORT_ALLOC_OUT_LEN 4
+/* The handle of the new v-port */
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_FREE
+ * de-allocate a v-port.
+ */
+#define MC_CMD_VPORT_FREE 0x97
+#undef MC_CMD_0x97_PRIVILEGE_CTG
+
+#define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_FREE_IN msgrequest */
+#define MC_CMD_VPORT_FREE_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+
+/* MC_CMD_VPORT_FREE_OUT msgresponse */
+#define MC_CMD_VPORT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_ALLOC
+ * allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_ALLOC 0x98
+#undef MC_CMD_0x98_PRIVILEGE_CTG
+
+#define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
+#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
+/* The port to connect to the v-adaptor's port. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* Flags controlling v-adaptor creation */
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+/* The number of VLAN tags to strip on receive */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+/* The number of VLAN tags to transparently insert/remove. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+/* The MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6
+/* enum: Derive the MAC address from the upstream port */
+#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0
+
+/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_FREE
+ * de-allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_FREE 0x99
+#undef MC_CMD_0x99_PRIVILEGE_CTG
+
+#define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_FREE_IN msgrequest */
+#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
+#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_SET_MAC
+ * assign a new MAC address to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_SET_MAC 0x5d
+#undef MC_CMD_0x5d_PRIVILEGE_CTG
+
+#define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_SET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The new MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
+
+/* MC_CMD_VADAPTOR_SET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_GET_MAC
+ * read the MAC address assigned to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_GET_MAC 0x5e
+#undef MC_CMD_0x5e_PRIVILEGE_CTG
+
+#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
+/* The MAC address assigned to this v-adaptor */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_QUERY
+ * read some config of v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_QUERY 0x61
+#undef MC_CMD_0x61_PRIVILEGE_CTG
+
+#define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_QUERY_IN msgrequest */
+#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
+#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+/* The number of VLAN tags that may still be added */
+#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_ASSIGN
+ * assign a port to a PCI function.
+ */
+#define MC_CMD_EVB_PORT_ASSIGN 0x9a
+#undef MC_CMD_0x9a_PRIVILEGE_CTG
+
+#define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
+/* The port to assign. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+/* The target function to modify. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16
+
+/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */
+#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RDWR_A64_REGIONS
+ * Assign the 64 bit region addresses.
+ */
+#define MC_CMD_RDWR_A64_REGIONS 0x9b
+#undef MC_CMD_0x9b_PRIVILEGE_CTG
+
+#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
+#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+/* Write enable bits 0-3, set to write, clear to read. */
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
+
+/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
+ * regardless of state of write bits in the request.
+ */
+#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_ALLOC
+ * Allocate an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
+#undef MC_CMD_0x9c_PRIVILEGE_CTG
+
+#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
+/* The handle of the owning upstream port */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
+/* The handle of the new Onload stack */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_FREE
+ * Free an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_FREE 0x9d
+#undef MC_CMD_0x9d_PRIVILEGE_CTG
+
+#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
+/* The handle of the Onload stack */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+
+/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_ALLOC
+ * Allocate an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
+#undef MC_CMD_0x9e_PRIVILEGE_CTG
+
+#define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
+/* The handle of the owning upstream port */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of context to allocate */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+/* enum: Allocate a context for exclusive use. The key and indirection table
+ * must be explicitly configured.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0
+/* enum: Allocate a context for shared use; this will spread across a range of
+ * queues, but the key and indirection table are pre-configured and may not be
+ * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
+/* Number of queues spanned by this context, in the range 1-64; valid offsets
+ * in the indirection table will be in the range 0 to NUM_QUEUES-1.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
+/* The handle of the new RSS context. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+/* enum: guaranteed invalid RSS context handle value */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_FREE
+ * Free an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_FREE 0x9f
+#undef MC_CMD_0x9f_PRIVILEGE_CTG
+
+#define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_KEY
+ * Set the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
+#undef MC_CMD_0xa0_PRIVILEGE_CTG
+
+#define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_KEY
+ * Get the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
+#undef MC_CMD_0xa1_PRIVILEGE_CTG
+
+#define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_TABLE
+ * Set the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
+#undef MC_CMD_0xa2_PRIVILEGE_CTG
+
+#define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_TABLE
+ * Get the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
+#undef MC_CMD_0xa3_PRIVILEGE_CTG
+
+#define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS
+ * Set various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
+#undef MC_CMD_0xe1_PRIVILEGE_CTG
+
+#define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+/* Hash control flags. The _EN bits are always supported, but new modes are
+ * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
+ * in this case, the MODE fields may be set to non-zero values, and will take
+ * effect regardless of the settings of the _EN flags. See the RSS_MODE
+ * structure for the meaning of the mode bits. Drivers must check the
+ * capability before trying to set any _MODE fields, as older firmware will
+ * reject any attempt to set the FLAGS field to a value > 0xff with EINVAL. In
+ * the case where all the _MODE flags are zero, the _EN flags take effect,
+ * providing backward compatibility for existing drivers. (Setting all _MODE
+ * *and* all _EN flags to zero is valid, to disable RSS spreading for that
+ * particular packet type.)
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS
+ * Get various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
+#undef MC_CMD_0xe2_PRIVILEGE_CTG
+
+#define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
+/* Hash control flags. If all _MODE bits are zero (which will always be true
+ * for older firmware which does not report the ADDITIONAL_RSS_MODES
+ * capability), the _EN bits report the state. If any _MODE bits are non-zero
+ * (which will only be true when the firmware reports ADDITIONAL_RSS_MODES)
+ * then the _EN bits should be disregarded, although the _MODE flags are
+ * guaranteed to be consistent with the _EN flags for a freshly-allocated RSS
+ * context and in the case where the _EN flags were used in the SET. This
+ * provides backward compatibility: old drivers will not be attempting to
+ * derive any meaning from the _MODE bits (and can never set them to any value
+ * not representable by the _EN bits); new drivers can always determine the
+ * mode by looking only at the _MODE bits; the value returned by a GET can
+ * always be used for a SET regardless of old/new driver vs. old/new firmware.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_ALLOC
+ * Allocate a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
+#undef MC_CMD_0xa4_PRIVILEGE_CTG
+
+#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
+/* The handle of the owning upstream port */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
+ * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
+ * referenced RSS contexts must span no more than this number.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
+/* The handle of the new .1p mapping. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+/* enum: guaranteed invalid .1p mapping handle value */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_FREE
+ * Free a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
+#undef MC_CMD_0xa5_PRIVILEGE_CTG
+
+#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+
+/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE
+ * Set the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
+#undef MC_CMD_0xa6_PRIVILEGE_CTG
+
+#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE
+ * Get the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
+#undef MC_CMD_0xa7_PRIVILEGE_CTG
+
+#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32
+
+
+/***********************************/
+/* MC_CMD_GET_VECTOR_CFG
+ * Get Interrupt Vector config for this PF.
+ */
+#define MC_CMD_GET_VECTOR_CFG 0xbf
+#undef MC_CMD_0xbf_PRIVILEGE_CTG
+
+#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
+
+/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
+/* Base absolute interrupt vector number. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+
+
+/***********************************/
+/* MC_CMD_SET_VECTOR_CFG
+ * Set Interrupt Vector config for this PF.
+ */
+#define MC_CMD_SET_VECTOR_CFG 0xc0
+#undef MC_CMD_0xc0_PRIVILEGE_CTG
+
+#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
+/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
+ * let the system find a suitable base.
+ */
+#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+
+/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS
+ * Add a MAC address to a v-port
+ */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
+#undef MC_CMD_0xa8_PRIVILEGE_CTG
+
+#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
+#undef MC_CMD_0xa9_PRIVILEGE_CTG
+
+#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
+#undef MC_CMD_0xaa_PRIVILEGE_CTG
+
+#define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
+/* The number of MAC addresses returned */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+/* Array of MAC addresses */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41
+
+
+/***********************************/
+/* MC_CMD_VPORT_RECONFIGURE
+ * Replace VLAN tags and/or MAC addresses of an existing v-port. If the v-port
+ * has already been passed to another function (v-port's user), then that
+ * function will be reset before applying the changes.
+ */
+#define MC_CMD_VPORT_RECONFIGURE 0xeb
+#undef MC_CMD_0xeb_PRIVILEGE_CTG
+
+#define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_RECONFIGURE_IN msgrequest */
+#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
+/* The handle of the v-port */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+/* Flags requesting what should be changed. */
+#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
+/* The number of MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+/* MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_NUM 4
+
+/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
+#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
+#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_QUERY
+ * read some config of v-port.
+ */
+#define MC_CMD_EVB_PORT_QUERY 0x62
+#undef MC_CMD_0x62_PRIVILEGE_CTG
+
+#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */
+#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+
+/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
+#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+/* The number of VLAN tags that may be used on a v-adaptor connected to this
+ * EVB port.
+ */
+#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+
+
+/***********************************/
+/* MC_CMD_DUMP_BUFTBL_ENTRIES
+ * Dump buffer table entries, mainly for command client debug use. Dumps
+ * absolute entries, and does not use chunk handles. All entries must be in
+ * range, and used for q page mapping, Although the latter restriction may be
+ * lifted in future.
+ */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
+#undef MC_CMD_0xab_PRIVILEGE_CTG
+
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
+/* Index of the first buffer table entry. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+/* Number of buffer table entries to dump. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
+/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
+
+
+/***********************************/
+/* MC_CMD_SET_RXDP_CONFIG
+ * Set global RXDP configuration settings
+ */
+#define MC_CMD_SET_RXDP_CONFIG 0xc1
+#undef MC_CMD_0xc1_PRIVILEGE_CTG
+
+#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
+#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2
+/* enum: pad to 64 bytes */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
+/* enum: pad to 128 bytes (Medford only) */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
+/* enum: pad to 256 bytes (Medford only) */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
+
+/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RXDP_CONFIG
+ * Get global RXDP configuration settings
+ */
+#define MC_CMD_GET_RXDP_CONFIG 0xc2
+#undef MC_CMD_0xc2_PRIVILEGE_CTG
+
+#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
+#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2
+/* Enum values, see field(s): */
+/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */
+
+
+/***********************************/
+/* MC_CMD_GET_CLOCK
+ * Return the system and PDCPU clock frequencies.
+ */
+#define MC_CMD_GET_CLOCK 0xac
+#undef MC_CMD_0xac_PRIVILEGE_CTG
+
+#define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CLOCK_IN msgrequest */
+#define MC_CMD_GET_CLOCK_IN_LEN 0
+
+/* MC_CMD_GET_CLOCK_OUT msgresponse */
+#define MC_CMD_GET_CLOCK_OUT_LEN 8
+/* System frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* DPCPU frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+
+
+/***********************************/
+/* MC_CMD_SET_CLOCK
+ * Control the system and DPCPU clock frequencies. Changes are lost reboot.
+ */
+#define MC_CMD_SET_CLOCK 0xad
+#undef MC_CMD_0xad_PRIVILEGE_CTG
+
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_CLOCK_IN msgrequest */
+#define MC_CMD_SET_CLOCK_IN_LEN 28
+/* Requested frequency in MHz for system clock domain */
+#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+/* enum: Leave the system clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for inter-core clock domain */
+#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+/* enum: Leave the inter-core clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for DPCPU clock domain */
+#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+/* enum: Leave the DPCPU clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for PCS clock domain */
+#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+/* enum: Leave the PCS clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for MC clock domain */
+#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+/* enum: Leave the MC clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for rmon clock domain */
+#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+/* enum: Leave the rmon clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for vswitch clock domain */
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+/* enum: Leave the vswitch clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
+
+/* MC_CMD_SET_CLOCK_OUT msgresponse */
+#define MC_CMD_SET_CLOCK_OUT_LEN 28
+/* Resulting system frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* enum: The system clock domain doesn't exist */
+#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
+/* Resulting inter-core frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+/* enum: The inter-core clock domain doesn't exist / isn't used */
+#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
+/* Resulting DPCPU frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+/* enum: The dpcpu clock domain doesn't exist */
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
+/* Resulting PCS frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+/* enum: The PCS clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
+/* Resulting MC frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+/* enum: The MC clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
+/* Resulting rmon frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+/* enum: The rmon clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
+/* Resulting vswitch frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+/* enum: The vswitch clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
+
+
+/***********************************/
+/* MC_CMD_DPCPU_RPC
+ * Send an arbitrary DPCPU message.
+ */
+#define MC_CMD_DPCPU_RPC 0xae
+#undef MC_CMD_0xae_PRIVILEGE_CTG
+
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DPCPU_RPC_IN msgrequest */
+#define MC_CMD_DPCPU_RPC_IN_LEN 36
+#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+/* enum: RxDPCPU0 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
+/* enum: TxDPCPU0 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
+/* enum: TxDPCPU1 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
+/* enum: RxDPCPU1 (Medford only) */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3
+/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_RX0)
+ */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80
+/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_TX0)
+ */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81
+/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
+ * initialised to zero
+ */
+#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
+/* Register data to write. Only valid in write/write-read. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+/* Register address. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+
+/* MC_CMD_DPCPU_RPC_OUT msgresponse */
+#define MC_CMD_DPCPU_RPC_OUT_LEN 36
+#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+/* DATA */
+#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+
+
+/***********************************/
+/* MC_CMD_TRIGGER_INTERRUPT
+ * Trigger an interrupt by prodding the BIU.
+ */
+#define MC_CMD_TRIGGER_INTERRUPT 0xe3
+#undef MC_CMD_0xe3_PRIVILEGE_CTG
+
+#define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
+/* Interrupt level relative to base for function. */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+
+/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
+#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SHMBOOT_OP
+ * Special operations to support (for now) shmboot.
+ */
+#define MC_CMD_SHMBOOT_OP 0xe6
+#undef MC_CMD_0xe6_PRIVILEGE_CTG
+
+#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SHMBOOT_OP_IN msgrequest */
+#define MC_CMD_SHMBOOT_OP_IN_LEN 4
+/* Identifies the operation to perform */
+#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+/* enum: Copy slave_data section to the slave core. (Greenport only) */
+#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
+
+/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
+#define MC_CMD_SHMBOOT_OP_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CAP_BLK_READ
+ * Read multiple 64bit words from capture block memory
+ */
+#define MC_CMD_CAP_BLK_READ 0xe7
+#undef MC_CMD_0xe7_PRIVILEGE_CTG
+
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CAP_BLK_READ_IN msgrequest */
+#define MC_CMD_CAP_BLK_READ_IN_LEN 12
+#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+
+/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
+#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
+#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
+#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
+
+
+/***********************************/
+/* MC_CMD_DUMP_DO
+ * Take a dump of the DUT state
+ */
+#define MC_CMD_DUMP_DO 0xe8
+#undef MC_CMD_0xe8_PRIVILEGE_CTG
+
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DUMP_DO_IN msgrequest */
+#define MC_CMD_DUMP_DO_IN_LEN 52
+#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+/* enum: The uart port this command was received over (if using a uart
+ * transport)
+ */
+#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+
+/* MC_CMD_DUMP_DO_OUT msgresponse */
+#define MC_CMD_DUMP_DO_OUT_LEN 4
+#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED
+ * Configure unsolicited dumps
+ */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
+#undef MC_CMD_0xe9_PRIVILEGE_CTG
+
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+
+
+/***********************************/
+/* MC_CMD_SET_PSU
+ * Adjusts power supply parameters. This is a warranty-voiding operation.
+ * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
+ * the parameter is out of range.
+ */
+#define MC_CMD_SET_PSU 0xea
+#undef MC_CMD_0xea_PRIVILEGE_CTG
+
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_PSU_IN msgrequest */
+#define MC_CMD_SET_PSU_IN_LEN 12
+#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
+/* desired value, eg voltage in mV */
+#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
+
+/* MC_CMD_SET_PSU_OUT msgresponse */
+#define MC_CMD_SET_PSU_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_FUNCTION_INFO
+ * Get function information. PF and VF number.
+ */
+#define MC_CMD_GET_FUNCTION_INFO 0xec
+#undef MC_CMD_0xec_PRIVILEGE_CTG
+
+#define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
+#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
+
+/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
+#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+
+
+/***********************************/
+/* MC_CMD_ENABLE_OFFLINE_BIST
+ * Enters offline BIST mode. All queues are torn down, chip enters quiescent
+ * mode, calling function gets exclusive MCDI ownership. The only way out is
+ * reboot.
+ */
+#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
+#undef MC_CMD_0xed_PRIVILEGE_CTG
+
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
+#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */
+#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_SEND_DATA
+ * Send checksummed[sic] block of data over the uart. Response is a placeholder
+ * should we wish to make this reliable; currently requests are fire-and-
+ * forget.
+ */
+#define MC_CMD_UART_SEND_DATA 0xee
+#undef MC_CMD_0xee_PRIVILEGE_CTG
+
+#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
+#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
+#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
+#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
+/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
+#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
+
+/* MC_CMD_UART_SEND_DATA_IN msgresponse */
+#define MC_CMD_UART_SEND_DATA_IN_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_RECV_DATA
+ * Request checksummed[sic] block of data over the uart. Only a placeholder,
+ * subject to change and not currently implemented.
+ */
+#define MC_CMD_UART_RECV_DATA 0xef
+#undef MC_CMD_0xef_PRIVILEGE_CTG
+
+#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
+#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
+/* CRC32 over OFFSET, LENGTH, RESERVED */
+#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+/* Offset from which to read the data */
+#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+
+/* MC_CMD_UART_RECV_DATA_IN msgresponse */
+#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
+#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252
+#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
+/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
+#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
+#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
+
+
+/***********************************/
+/* MC_CMD_READ_FUSES
+ * Read data programmed into the device One-Time-Programmable (OTP) Fuses
+ */
+#define MC_CMD_READ_FUSES 0xf0
+#undef MC_CMD_0xf0_PRIVILEGE_CTG
+
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_FUSES_IN msgrequest */
+#define MC_CMD_READ_FUSES_IN_LEN 8
+/* Offset in OTP to read */
+#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+/* Length of data to read in bytes */
+#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+
+/* MC_CMD_READ_FUSES_OUT msgresponse */
+#define MC_CMD_READ_FUSES_OUT_LENMIN 4
+#define MC_CMD_READ_FUSES_OUT_LENMAX 252
+#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
+/* Length of returned OTP data in bytes */
+#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+/* Returned data */
+#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
+#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
+#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
+#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_KR_TUNE
+ * Get or set KR Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_KR_TUNE 0xf1
+#undef MC_CMD_0xf1_PRIVILEGE_CTG
+
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_KR_TUNE_IN msgrequest */
+#define MC_CMD_KR_TUNE_IN_LENMIN 4
+#define MC_CMD_KR_TUNE_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
+/* enum: Force KR Serdes reset / recalibration */
+#define MC_CMD_KR_TUNE_IN_RECAL 0x4
+/* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
+ * signal.
+ */
+#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6
+/* enum: Read Figure Of Merit (eye quality, higher is better). */
+#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_OFST 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_LEN 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MINNUM 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_KR_TUNE_OUT msgresponse */
+#define MC_CMD_KR_TUNE_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15, Huntington) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15, Huntington) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: Edge DFE Tap1 (Huntington - 0 - max negative, 64 - zero, 127 - max
+ * positive, Medford - 0-31)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
+/* enum: Edge DFE Tap2 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-31)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
+/* enum: Edge DFE Tap3 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
+/* enum: Edge DFE Tap4 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
+/* enum: Edge DFE Tap5 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
+/* enum: Edge DFE DLEV (0-128 for Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7
+/* enum: Variable Gain Amplifier (0-15, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8
+/* enum: CTLE EQ Capacitor (0-15, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+/* enum: CTLE EQ Resistor (0-7, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TX Amplitude (Huntington, Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
+/* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
+/* enum: De-Emphasis Tap1 Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
+/* enum: De-Emphasis Tap2 Magnitude (0-6) (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
+/* enum: De-Emphasis Tap2 Fine (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
+/* enum: Pre-Emphasis Magnitude (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
+/* enum: Pre-Emphasis Fine (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
+/* enum: TX Slew Rate Coarse control (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
+/* enum: TX Slew Rate Fine control (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
+/* enum: TX Termination Impedance control (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
+/* enum: TX Amplitude Fine control (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa
+/* enum: Pre-shoot Tap (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb
+/* enum: De-emphasis Tap (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */
+#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
+/* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+
+
+/***********************************/
+/* MC_CMD_PCIE_TUNE
+ * Get or set PCIE Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_PCIE_TUNE 0xf2
+#undef MC_CMD_0xf2_PRIVILEGE_CTG
+
+#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PCIE_TUNE_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
+#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
+#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
+/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
+#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
+/* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */
+#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_PCIE_TUNE_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
+/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
+/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
+/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
+/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
+/* enum: DFE DLev */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
+/* enum: Figure of Merit */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
+/* enum: CTLE EQ Capacitor (HF Gain) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+/* enum: CTLE EQ Resistor (DC Gain) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TxMargin (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
+/* enum: TxSwing (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
+/* enum: De-emphasis coefficient C(-1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
+/* enum: De-emphasis coefficient C(0) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
+/* enum: De-emphasis coefficient C(+1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
+/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN_LEN 0
+
+/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT msgrequest */
+#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSING
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - not used for V3 licensing
+ */
+#define MC_CMD_LICENSING 0xf3
+#undef MC_CMD_0xf3_PRIVILEGE_CTG
+
+#define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_IN msgrequest */
+#define MC_CMD_LICENSING_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_IN_OP_OFST 0
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses */
+#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1
+
+/* MC_CMD_LICENSING_OUT msgresponse */
+#define MC_CMD_LICENSING_OUT_LEN 28
+/* count of application keys which are valid */
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+/* count of application keys which are invalid due to being blacklisted */
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+/* count of application keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+/* count of application keys which are invalid due to being for the wrong node
+ */
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1
+
+
+/***********************************/
+/* MC_CMD_LICENSING_V3
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_V3 0xd0
+#undef MC_CMD_0xd0_PRIVILEGE_CTG
+
+#define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_V3_IN msgrequest */
+#define MC_CMD_LICENSING_V3_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_V3_IN_OP_OFST 0
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses Returns EAGAIN if license
+ * processing (updating) has been started but not yet completed.
+ */
+#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1
+
+/* MC_CMD_LICENSING_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_V3_OUT_LEN 88
+/* count of keys which are valid */
+#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+/* count of keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+/* count of keys which are invalid due to being for the wrong node */
+#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1
+/* bitmask of licensed applications */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24
+/* bitmask of licensed features */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24
+
+
+/***********************************/
+/* MC_CMD_LICENSING_GET_ID_V3
+ * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license
+ * partition - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_GET_ID_V3 0xd1
+#undef MC_CMD_0xd1_PRIVILEGE_CTG
+
+#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */
+#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0
+
+/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
+/* type of license (eg 3) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+/* length of the license ID (in bytes) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+/* the unique license ID of the adapter */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244
+
+
+/***********************************/
+/* MC_CMD_MC2MC_PROXY
+ * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
+ * This will fail on a single-core system.
+ */
+#define MC_CMD_MC2MC_PROXY 0xf4
+#undef MC_CMD_0xf4_PRIVILEGE_CTG
+
+#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MC2MC_PROXY_IN msgrequest */
+#define MC_CMD_MC2MC_PROXY_IN_LEN 0
+
+/* MC_CMD_MC2MC_PROXY_OUT msgresponse */
+#define MC_CMD_MC2MC_PROXY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
+ * or a reboot of the MC.) Not used for V3 licensing
+ */
+#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+#undef MC_CMD_0xf5_PRIVILEGE_CTG
+
+#define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
+/* application ID to query (LICENSED_APP_ID_xxx) */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+
+/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_V3_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2
+#undef MC_CMD_0xd2_PRIVILEGE_CTG
+
+#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8
+/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit
+ * mask
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES
+ * Query the state of an one or more licensed features. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3
+#undef MC_CMD_0xd3_PRIVILEGE_CTG
+
+#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8
+/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or
+ * more bits set
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
+/* states of these features - bit set for licensed, clear for not licensed */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
+
+
+/***********************************/
+/* MC_CMD_LICENSED_APP_OP
+ * Perform an action for an individual licensed application - not used for V3
+ * licensing.
+ */
+#define MC_CMD_LICENSED_APP_OP 0xf6
+#undef MC_CMD_0xf6_PRIVILEGE_CTG
+
+#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
+#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+/* enum: validate application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
+/* enum: mask application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
+/* arguments specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
+
+/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
+/* result specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+/* validation challenge */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
+/* feature expiry (time_t) */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+/* validation response */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+/* flag */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+
+/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSED_V3_VALIDATE_APP
+ * Perform validation for an individual licensed application - V3 licensing
+ * (Medford)
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4
+#undef MC_CMD_0xd4_PRIVILEGE_CTG
+
+#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 56
+/* challenge for validation (384 bits) */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 48
+/* application ID expressed as a single bit mask */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116
+/* validation response to challenge in the form of ECDSA signature consisting
+ * of two 384-bit integers, r and s, in big-endian order. The signature signs a
+ * SHA-384 digest of a message constructed from the concatenation of the input
+ * message and the remaining fields of this output message, e.g. challenge[48
+ * bytes] ... expiry_time[4 bytes] ...
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
+/* application expiry time */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
+/* application expiry units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
+/* enum: expiry units are accounting units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
+/* enum: expiry units are calendar days */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
+/* base MAC address of the NIC stored in NVRAM (note that this is a constant
+ * value for a given NIC regardless which function is calling, effectively this
+ * is PF0 base MAC address)
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_OFST 104
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_LEN 6
+/* MAC address of v-adaptor associated with the client. If no such v-adapator
+ * exists, then the field is filled with 0xFF.
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_OFST 110
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_LEN 6
+
+
+/***********************************/
+/* MC_CMD_LICENSED_V3_MASK_FEATURES
+ * Mask features - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
+#undef MC_CMD_0xd5_PRIVILEGE_CTG
+
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
+/* mask to be applied to features to be changed */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
+/* whether to turn on or turn off the masked features */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+/* enum: turn the features off */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
+/* enum: turn the features back on */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSING_V3_TEMPORARY
+ * Perform operations to support installation of a single temporary license in
+ * the adapter, in addition to those found in the licensing partition. See
+ * SF-116124-SW for an overview of how this could be used. The license is
+ * stored in MC persistent data and so will survive a MC reboot, but will be
+ * erased when the adapter is power cycled
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
+#undef MC_CMD_0xd6_PRIVILEGE_CTG
+
+#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
+/* operation code */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
+/* enum: install a new license, overwriting any existing temporary license.
+ * This is an asynchronous operation owing to the time taken to validate an
+ * ECDSA license
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0
+/* enum: clear the license immediately rather than waiting for the next power
+ * cycle
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1
+/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET
+ * operation
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
+/* ECDSA license and signature */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
+/* status code */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
+/* enum: finished validating and installing license */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
+/* enum: license validation and installation in progress */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1
+/* enum: licensing error. More specific error messages are not provided to
+ * avoid exposing details of the licensing system to the client
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2
+/* bitmask of licensed features */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_SNIFF_CONFIG
+ * Configure RX port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic delivered to the host (non-promiscuous
+ * mode) or all traffic arriving at the port (promiscuous mode) may be
+ * delivered to a specific queue, or a set of queues with RSS.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
+#undef MC_CMD_0xf7_PRIVILEGE_CTG
+
+#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_SNIFF_CONFIG
+ * Obtain the current RX port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
+#undef MC_CMD_0xf8_PRIVILEGE_CTG
+
+#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_SET_PARSER_DISP_CONFIG
+ * Change configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9
+#undef MC_CMD_0xf9_PRIVILEGE_CTG
+
+#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
+/* the type of configuration setting to change */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
+ * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0
+/* enum: Per-v-adaptor enable for suppression of self-transmissions on the
+ * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single
+ * boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1
+/* handle for the entity to update: queue handle, EVB port ID, etc. depending
+ * on the type of configuration setting being changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+/* new value: the details depend on the type of configuration setting being
+ * changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_CONFIG
+ * Read configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
+#undef MC_CMD_0xfa_PRIVILEGE_CTG
+
+#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
+/* the type of configuration setting to read */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
+/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
+ * the type of configuration setting being read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
+/* current value: the details depend on the type of configuration setting being
+ * read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG
+ * Configure TX port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic transmitted through the port may be
+ * delivered to a specific queue, or a set of queues with RSS. Note that these
+ * packets are delivered with transmit timestamps in the packet prefix, not
+ * receive timestamps, so it is likely that the queue(s) will need to be
+ * dedicated as TX sniff receivers.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
+#undef MC_CMD_0xfb_PRIVILEGE_CTG
+
+#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG
+ * Obtain the current TX port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
+#undef MC_CMD_0xfc_PRIVILEGE_CTG
+
+#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_RMON_STATS_RX_ERRORS
+ * Per queue rx error stats.
+ */
+#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
+#undef MC_CMD_0xfe_PRIVILEGE_CTG
+
+#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
+/* The rx queue to get stats for. */
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_GET_PCIE_RESOURCE_INFO
+ * Find out about available PCIE resources
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
+/* The maximum number of PFs the device can expose */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+/* The maximum number of VFs the device can expose in total */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+/* The maximum number of MSI-X vectors the device can provide in total */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+/* the number of MSI-X vectors the device will allocate by default to each PF
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+/* the number of MSI-X vectors the device will allocate by default to each VF
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+/* the maximum number of MSI-X vectors the device can allocate to any one PF */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+/* the maximum number of MSI-X vectors the device can allocate to any one VF */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_MODES
+ * Find out about available port modes
+ */
+#define MC_CMD_GET_PORT_MODES 0xff
+#undef MC_CMD_0xff_PRIVILEGE_CTG
+
+#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_MODES_IN msgrequest */
+#define MC_CMD_GET_PORT_MODES_IN_LEN 0
+
+/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
+#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
+#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+/* Default (canonical) board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+/* Current board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+
+
+/***********************************/
+/* MC_CMD_READ_ATB
+ * Sample voltages on the ATB
+ */
+#define MC_CMD_READ_ATB 0x100
+#undef MC_CMD_0x100_PRIVILEGE_CTG
+
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_ATB_IN msgrequest */
+#define MC_CMD_READ_ATB_IN_LEN 16
+#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
+#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+
+/* MC_CMD_READ_ATB_OUT msgresponse */
+#define MC_CMD_READ_ATB_OUT_LEN 4
+#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+#undef MC_CMD_0x59_PRIVILEGE_CTG
+
+#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+/* enum: Bug 17230 work around. */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20
+/* enum: Bug 26807 features present in firmware (multicast filter chaining) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40
+/* enum: Bug 61265 work around (broken EVQ TMR writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG61265 0x80
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MASK
+ * Read/set privileges of an arbitrary PCIe function
+ */
+#define MC_CMD_PRIVILEGE_MASK 0x5a
+#undef MC_CMD_0x5a_PRIVILEGE_CTG
+
+#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MASK_IN_LEN 8
+/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF
+ * 1,3 = 0x00030001
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */
+/* New privilege mask to be set. The mask will only be changed if the MSB is
+ * set to 1.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */
+/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */
+/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC
+ * adress.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800
+/* enum: Privilege that allows a Function to change the MAC address configured
+ * in its associated vAdapter/vPort.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000
+/* enum: Privilege that allows a Function to install filters that specify VLANs
+ * that are not in the permit list for the associated vPort. This privilege is
+ * primarily to support ESX where vPorts are created that restrict traffic to
+ * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000
+/* enum: Set this bit to indicate that a new privilege mask is to be set,
+ * otherwise the command will only read the existing mask.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000
+
+/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
+/* For an admin function, always all the privileges are reported. */
+#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+#undef MC_CMD_0x5c_PRIVILEGE_CTG
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_SNAPSHOT_LENGTH
+ * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * parameter to MC_CMD_INIT_RXQ.
+ */
+#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
+#undef MC_CMD_0x101_PRIVILEGE_CTG
+
+#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
+/* Minimum acceptable snapshot length. */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+/* Maximum acceptable snapshot length. */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+
+
+/***********************************/
+/* MC_CMD_FUSE_DIAGS
+ * Additional fuse diagnostics
+ */
+#define MC_CMD_FUSE_DIAGS 0x102
+#undef MC_CMD_0x102_PRIVILEGE_CTG
+
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_FUSE_DIAGS_IN msgrequest */
+#define MC_CMD_FUSE_DIAGS_IN_LEN 0
+
+/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
+#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
+/* Total number of mismatched bits between pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+/* Checksum of data after logical OR of pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+/* Total number of mismatched bits between pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+/* Checksum of data after logical OR of pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+/* Total number of mismatched bits between pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+/* Checksum of data after logical OR of pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MODIFY
+ * Modify the privileges of a set of PCIe functions. Note that this operation
+ * only effects non-admin functions unless the admin privilege itself is
+ * included in one of the masks provided.
+ */
+#define MC_CMD_PRIVILEGE_MODIFY 0x60
+#undef MC_CMD_0x60_PRIVILEGE_CTG
+
+#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
+/* The groups of functions to have their privilege masks modified. */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
+/* For VFS_OF_PF specify the PF, for ONE specify the target function */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
+/* Privileges to be added to the target functions. For privilege definitions
+ * refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+/* Privileges to be removed from the target functions. For privilege
+ * definitions refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+
+/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_BYTES
+ * Read XPM memory
+ */
+#define MC_CMD_XPM_READ_BYTES 0x103
+#undef MC_CMD_0x103_PRIVILEGE_CTG
+
+#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_BYTES_IN msgrequest */
+#define MC_CMD_XPM_READ_BYTES_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
+#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
+#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
+#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
+/* Data */
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_BYTES
+ * Write XPM memory
+ */
+#define MC_CMD_XPM_WRITE_BYTES 0x104
+#undef MC_CMD_0x104_PRIVILEGE_CTG
+
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
+#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
+#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
+#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
+/* Start address (byte) */
+#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+/* Data */
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
+
+/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_SECTOR
+ * Read XPM sector
+ */
+#define MC_CMD_XPM_READ_SECTOR 0x105
+#undef MC_CMD_0x105_PRIVILEGE_CTG
+
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8
+/* Sector index */
+#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+/* Sector size */
+#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+
+/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
+#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
+#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
+/* Sector type */
+#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
+/* Sector data */
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_SECTOR
+ * Write XPM sector
+ */
+#define MC_CMD_XPM_WRITE_SECTOR 0x106
+#undef MC_CMD_0x106_PRIVILEGE_CTG
+
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
+/* If writing fails due to an uncorrectable error, try up to RETRIES following
+ * sectors (or until no more space available). If 0, only one write attempt is
+ * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
+ * mechanism.
+ */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
+/* Sector type */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
+/* Sector size */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+/* Sector data */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
+
+/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
+/* New sector index */
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+
+
+/***********************************/
+/* MC_CMD_XPM_INVALIDATE_SECTOR
+ * Invalidate XPM sector
+ */
+#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
+#undef MC_CMD_0x107_PRIVILEGE_CTG
+
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
+/* Sector index */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_BLANK_CHECK
+ * Blank-check XPM memory and report bad locations
+ */
+#define MC_CMD_XPM_BLANK_CHECK 0x108
+#undef MC_CMD_0x108_PRIVILEGE_CTG
+
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
+#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
+/* Total number of bad (non-blank) locations */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
+ * into MCDI response)
+ */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
+
+
+/***********************************/
+/* MC_CMD_XPM_REPAIR
+ * Blank-check and repair XPM memory
+ */
+#define MC_CMD_XPM_REPAIR 0x109
+#undef MC_CMD_0x109_PRIVILEGE_CTG
+
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_REPAIR_IN msgrequest */
+#define MC_CMD_XPM_REPAIR_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_REPAIR_OUT msgresponse */
+#define MC_CMD_XPM_REPAIR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_DECODER_TEST
+ * Test XPM memory address decoders for gross manufacturing defects. Can only
+ * be performed on an unprogrammed part.
+ */
+#define MC_CMD_XPM_DECODER_TEST 0x10a
+#undef MC_CMD_0x10a_PRIVILEGE_CTG
+
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
+#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */
+#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_TEST
+ * XPM memory write test. Test XPM write logic for gross manufacturing defects
+ * by writing to a dedicated test row. There are 16 locations in the test row
+ * and the test can only be performed on locations that have not been
+ * previously used (i.e. can be run at most 16 times). The test will pick the
+ * first available location to use, or fail with ENOSPC if none left.
+ */
+#define MC_CMD_XPM_WRITE_TEST 0x10b
+#undef MC_CMD_0x10b_PRIVILEGE_CTG
+
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
+#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_EXEC_SIGNED
+ * Check the CMAC of the contents of IMEM and DMEM against the value supplied
+ * and if correct begin execution from the start of IMEM. The caller supplies a
+ * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC
+ * computation runs from the start of IMEM, and from the start of DMEM + 16k,
+ * to match flash booting. The command will respond with EINVAL if the CMAC
+ * does match, otherwise it will respond with success before it jumps to IMEM.
+ */
+#define MC_CMD_EXEC_SIGNED 0x10c
+#undef MC_CMD_0x10c_PRIVILEGE_CTG
+
+#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_EXEC_SIGNED_IN msgrequest */
+#define MC_CMD_EXEC_SIGNED_IN_LEN 28
+/* the length of code to include in the CMAC */
+#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
+/* the length of date to include in the CMAC */
+#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
+/* the XPM sector containing the key to use */
+#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
+/* the expected CMAC value */
+#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
+#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
+
+/* MC_CMD_EXEC_SIGNED_OUT msgresponse */
+#define MC_CMD_EXEC_SIGNED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PREPARE_SIGNED
+ * Prepare to upload a signed image. This will scrub the specified length of
+ * the data region, which must be at least as large as the DATALEN supplied to
+ * MC_CMD_EXEC_SIGNED.
+ */
+#define MC_CMD_PREPARE_SIGNED 0x10d
+#undef MC_CMD_0x10d_PRIVILEGE_CTG
+
+#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PREPARE_SIGNED_IN msgrequest */
+#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
+/* the length of data area to clear */
+#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
+
+/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
+#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_SECURITY_RULE
+ * Set blacklist and/or whitelist action for a particular match criteria.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_SET_SECURITY_RULE 0x10f
+#undef MC_CMD_0x10f_PRIVILEGE_CTG
+
+#define MC_CMD_0x10f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_SECURITY_RULE_IN msgrequest */
+#define MC_CMD_SET_SECURITY_RULE_IN_LEN 92
+/* fields to include in match criteria */
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_FIELDS_OFST 0
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_LBN 0
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_LBN 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_MAC_LBN 2
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_MAC_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORT_LBN 3
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORT_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_MAC_LBN 4
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_MAC_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORT_LBN 5
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORT_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_PHYSICAL_PORT_LBN 10
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_PHYSICAL_PORT_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_RESERVED_LBN 11
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_RESERVED_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_SUBNET_ID_LBN 12
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_SUBNET_ID_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORTRANGE_ID_LBN 13
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORTRANGE_ID_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORTRANGE_ID_LBN 14
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORTRANGE_ID_WIDTH 1
+/* remote MAC address to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_MAC_OFST 4
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_MAC_LEN 6
+/* remote port to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORT_OFST 10
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORT_LEN 2
+/* local MAC address to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_MAC_OFST 12
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_MAC_LEN 6
+/* local port to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORT_OFST 18
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_ETHER_TYPE_OFST 20
+#define MC_CMD_SET_SECURITY_RULE_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_INNER_VLAN_OFST 22
+#define MC_CMD_SET_SECURITY_RULE_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_OUTER_VLAN_OFST 24
+#define MC_CMD_SET_SECURITY_RULE_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_OFST 26
+#define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_LEN 2
+/* Physical port to match (as little-endian 32-bit value) */
+#define MC_CMD_SET_SECURITY_RULE_IN_PHYSICAL_PORT_OFST 28
+/* Reserved; set to 0 */
+#define MC_CMD_SET_SECURITY_RULE_IN_RESERVED_OFST 32
+/* remote IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_IP_OFST 36
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_IP_LEN 16
+/* local IP address to match (as bytes in network order; set last 12 bytes to 0
+ * for IPv4 address)
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_IP_OFST 52
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_IP_LEN 16
+/* remote subnet ID to match (as little-endian 32-bit value); note that remote
+ * subnets are matched by mapping the remote IP address to a "subnet ID" via a
+ * data structure which must already have been configured using
+ * MC_CMD_SUBNET_MAP_SET_NODE appropriately
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_SUBNET_ID_OFST 68
+/* remote portrange ID to match (as little-endian 32-bit value); note that
+ * remote port ranges are matched by mapping the remote port to a "portrange
+ * ID" via a data structure which must already have been configured using
+ * MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORTRANGE_ID_OFST 72
+/* local portrange ID to match (as little-endian 32-bit value); note that local
+ * port ranges are matched by mapping the local port to a "portrange ID" via a
+ * data structure which must already have been configured using
+ * MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORTRANGE_ID_OFST 76
+/* set the action for transmitted packets matching this rule */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_OFST 80
+/* enum: make no decision */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_NONE 0x0
+/* enum: decide to accept the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_WHITELIST 0x1
+/* enum: decide to drop the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_BLACKLIST 0x2
+/* enum: do not change the current TX action */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_UNCHANGED 0xffffffff
+/* set the action for received packets matching this rule */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_OFST 84
+/* enum: make no decision */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_NONE 0x0
+/* enum: decide to accept the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_WHITELIST 0x1
+/* enum: decide to drop the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_BLACKLIST 0x2
+/* enum: do not change the current RX action */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_UNCHANGED 0xffffffff
+/* counter ID to associate with this rule; IDs are allocated using
+ * MC_CMD_SECURITY_RULE_COUNTER_ALLOC
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_OFST 88
+/* enum: special value for the null counter ID */
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_NONE 0x0
+
+/* MC_CMD_SET_SECURITY_RULE_OUT msgresponse */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LEN 28
+/* new reference count for uses of counter ID */
+#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_REFCNT_OFST 0
+/* constructed match bits for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_OFST 4
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_LEN 12
+/* constructed discriminator bits for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_DISCRIMINATOR_OFST 16
+/* base location for probes for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_BASE_OFST 20
+/* step for probes for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_STEP_OFST 24
+
+
+/***********************************/
+/* MC_CMD_RESET_SECURITY_RULES
+ * Reset all blacklist and whitelist actions for a particular physical port, or
+ * all ports. (Medford-only; for use by SolarSecure apps, not directly by
+ * drivers. See SF-114946-SW.) NOTE - this message definition is provisional.
+ * It has not yet been used in any released code and may change during
+ * development. This note will be removed once it is regarded as stable.
+ */
+#define MC_CMD_RESET_SECURITY_RULES 0x110
+#undef MC_CMD_0x110_PRIVILEGE_CTG
+
+#define MC_CMD_0x110_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RESET_SECURITY_RULES_IN msgrequest */
+#define MC_CMD_RESET_SECURITY_RULES_IN_LEN 4
+/* index of physical port to reset (or ALL_PHYSICAL_PORTS to reset all) */
+#define MC_CMD_RESET_SECURITY_RULES_IN_PHYSICAL_PORT_OFST 0
+/* enum: special value to reset all physical ports */
+#define MC_CMD_RESET_SECURITY_RULES_IN_ALL_PHYSICAL_PORTS 0xffffffff
+
+/* MC_CMD_RESET_SECURITY_RULES_OUT msgresponse */
+#define MC_CMD_RESET_SECURITY_RULES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SECURITY_RULESET_VERSION
+ * Return a large hash value representing a "version" of the complete set of
+ * currently active blacklist / whitelist rules and associated data structures.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION 0x111
+#undef MC_CMD_0x111_PRIVILEGE_CTG
+
+#define MC_CMD_0x111_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_SECURITY_RULESET_VERSION_IN msgrequest */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_IN_LEN 0
+
+/* MC_CMD_GET_SECURITY_RULESET_VERSION_OUT msgresponse */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LENMIN 1
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LENMAX 252
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LEN(num) (0+1*(num))
+/* Opaque hash value; length may vary depending on the hash scheme used */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_OFST 0
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_LEN 1
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_MINNUM 1
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC
+ * Allocate counters for use with blacklist / whitelist rules. (Medford-only;
+ * for use by SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ * NOTE - this message definition is provisional. It has not yet been used in
+ * any released code and may change during development. This note will be
+ * removed once it is regarded as stable.
+ */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC 0x112
+#undef MC_CMD_0x112_PRIVILEGE_CTG
+
+#define MC_CMD_0x112_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN msgrequest */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_LEN 4
+/* the number of new counter IDs to request */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_NUM_COUNTERS_OFST 0
+
+/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT msgresponse */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMIN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMAX 252
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LEN(num) (4+4*(num))
+/* the number of new counter IDs allocated (may be less than the number
+ * requested if resources are unavailable)
+ */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_NUM_COUNTERS_OFST 0
+/* new counter ID(s) */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_OFST 4
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_LEN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_MINNUM 0
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM 62
+
+
+/***********************************/
+/* MC_CMD_SECURITY_RULE_COUNTER_FREE
+ * Allocate counters for use with blacklist / whitelist rules. (Medford-only;
+ * for use by SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ * NOTE - this message definition is provisional. It has not yet been used in
+ * any released code and may change during development. This note will be
+ * removed once it is regarded as stable.
+ */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE 0x113
+#undef MC_CMD_0x113_PRIVILEGE_CTG
+
+#define MC_CMD_0x113_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SECURITY_RULE_COUNTER_FREE_IN msgrequest */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMIN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMAX 252
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LEN(num) (4+4*(num))
+/* the number of counter IDs to free */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_NUM_COUNTERS_OFST 0
+/* the counter ID(s) to free */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_OFST 4
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_LEN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_MINNUM 0
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_MAXNUM 62
+
+/* MC_CMD_SECURITY_RULE_COUNTER_FREE_OUT msgresponse */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SUBNET_MAP_SET_NODE
+ * Atomically update a trie node in the map of subnets to subnet IDs. The
+ * constants in the descriptions of the fields of this message may be retrieved
+ * by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO. (Medford-
+ * only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_SUBNET_MAP_SET_NODE 0x114
+#undef MC_CMD_0x114_PRIVILEGE_CTG
+
+#define MC_CMD_0x114_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SUBNET_MAP_SET_NODE_IN msgrequest */
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMIN 6
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMAX 252
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LEN(num) (4+2*(num))
+/* node to update in the range 0 .. SUBNET_MAP_NUM_NODES-1 */
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_NODE_ID_OFST 0
+/* SUBNET_MAP_NUM_ENTRIES_PER_NODE new entries; each entry is either a pointer
+ * to the next node, expressed as an offset in the trie memory (i.e. node ID
+ * multiplied by SUBNET_MAP_NUM_ENTRIES_PER_NODE), or a leaf value in the range
+ * SUBNET_ID_MIN .. SUBNET_ID_MAX
+ */
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_OFST 4
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_LEN 2
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_MINNUM 1
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_MAXNUM 124
+
+/* MC_CMD_SUBNET_MAP_SET_NODE_OUT msgresponse */
+#define MC_CMD_SUBNET_MAP_SET_NODE_OUT_LEN 0
+
+/* PORTRANGE_TREE_ENTRY structuredef */
+#define PORTRANGE_TREE_ENTRY_LEN 4
+/* key for branch nodes (<= key takes left branch, > key takes right branch),
+ * or magic value for leaf nodes
+ */
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_OFST 0
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LEN 2
+#define PORTRANGE_TREE_ENTRY_LEAF_NODE_KEY 0xffff /* enum */
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LBN 0
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_WIDTH 16
+/* final portrange ID for leaf nodes (don't care for branch nodes) */
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_OFST 2
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_LEN 2
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_LBN 16
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE
+ * Atomically update the entire tree mapping remote port ranges to portrange
+ * IDs. The constants in the descriptions of the fields of this message may be
+ * retrieved by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE 0x115
+#undef MC_CMD_0x115_PRIVILEGE_CTG
+
+#define MC_CMD_0x115_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN msgrequest */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMAX 252
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LEN(num) (0+4*(num))
+/* PORTRANGE_TREE_NUM_ENTRIES new entries, each laid out as a
+ * PORTRANGE_TREE_ENTRY
+ */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_OFST 0
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_LEN 4
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MINNUM 1
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM 63
+
+/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_OUT msgresponse */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE
+ * Atomically update the entire tree mapping remote port ranges to portrange
+ * IDs. The constants in the descriptions of the fields of this message may be
+ * retrieved by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE 0x116
+#undef MC_CMD_0x116_PRIVILEGE_CTG
+
+#define MC_CMD_0x116_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN msgrequest */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMAX 252
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LEN(num) (0+4*(num))
+/* PORTRANGE_TREE_NUM_ENTRIES new entries, each laid out as a
+ * PORTRANGE_TREE_ENTRY
+ */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_OFST 0
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_LEN 4
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MINNUM 1
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM 63
+
+/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_OUT msgresponse */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_OUT_LEN 0
+
+/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
+/* UDP port (the standard ports are named below but any port may be used) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
+/* enum: the IANA allocated UDP port for VXLAN */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
+/* enum: the IANA allocated UDP port for Geneve */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
+/* tunnel encapsulation protocol (only those named below are supported) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
+/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
+/* enum: This port will be used for Geneve on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS
+ * Configure UDP ports for tunnel encapsulation hardware acceleration. The
+ * parser-dispatcher will attempt to parse traffic on these ports as tunnel
+ * encapsulation PDUs and filter them using the tunnel encapsulation filter
+ * chain rather than the standard filter chain. Note that this command can
+ * cause all functions to see a reset. (Available on Medford only.)
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117
+#undef MC_CMD_0x117_PRIVILEGE_CTG
+
+#define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num))
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1
+/* The number of entries in the ENTRIES array */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN 2
+/* Entries defining the UDP port to protocol mapping, each laid out as a
+ * TUNNEL_ENCAP_UDP_PORT_ENTRY
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_OFST 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_RX_BALANCING
+ * Configure a port upconverter to distribute the packets on both RX engines.
+ * Packets are distributed based on a table with the destination vFIFO. The
+ * index of the table is a hash of source and destination of IPV4 and VLAN
+ * priority.
+ */
+#define MC_CMD_RX_BALANCING 0x118
+#undef MC_CMD_0x118_PRIVILEGE_CTG
+
+#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RX_BALANCING_IN msgrequest */
+#define MC_CMD_RX_BALANCING_IN_LEN 16
+/* The RX port whose upconverter table will be modified */
+#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
+/* The VLAN priority associated to the table index and vFIFO */
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
+/* The resulting bit of SRC^DST for indexing the table */
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
+/* The RX engine to which the vFIFO in the table entry will point to */
+#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12
+
+/* MC_CMD_RX_BALANCING_OUT msgresponse */
+#define MC_CMD_RX_BALANCING_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TSA_BIND
+ * TSAN - TSAC binding communication protocol. Refer to SF-115479-TC for more
+ * info in respect to the binding protocol. This MCDI command is only available
+ * over a TLS secure connection between the TSAN and TSAC, and is not available
+ * to host software. Note- The messages definitions that do comprise this MCDI
+ * command deemed as provisional. This MCDI command has not yet been used in
+ * any released code and may change during development. This note will be
+ * removed once it is regarded as stable.
+ */
+#define MC_CMD_TSA_BIND 0x119
+#undef MC_CMD_0x119_PRIVILEGE_CTG
+
+#define MC_CMD_0x119_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TSA_BIND_IN msgrequest: Protocol operation code */
+#define MC_CMD_TSA_BIND_IN_LEN 4
+#define MC_CMD_TSA_BIND_IN_OP_OFST 0
+/* enum: Retrieve the TSAN ID from a TSAN. TSAN ID is a unique identifier for
+ * the network adapter. More specifically, TSAN ID equals the MAC address of
+ * the network adapter. TSAN ID is used as part of the TSAN authentication
+ * protocol. Refer to SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_OP_GET_ID 0x1
+/* enum: Get a binding ticket from the TSAN. The binding ticket is used as part
+ * of the binding procedure to authorize the binding of an adapter to a TSAID.
+ * Refer to SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_OP_GET_TICKET 0x2
+/* enum: Opcode associated with the propagation of a private key that TSAN uses
+ * as part of post-binding authentication procedure. More specifically, TSAN
+ * uses this key for a signing operation. TSAC uses the counterpart public key
+ * to verify the signature. Note - The post-binding authentication occurs when
+ * the TSAN-TSAC connection terminates and TSAN tries to reconnect. Refer to
+ * SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_OP_SET_KEY 0x3
+/* enum: Request an unbinding operation. Note- TSAN clears the binding ticket
+ * from the Nvram section.
+ */
+#define MC_CMD_TSA_BIND_OP_UNBIND 0x4
+
+/* MC_CMD_TSA_BIND_IN_GET_ID msgrequest */
+#define MC_CMD_TSA_BIND_IN_GET_ID_LEN 20
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_GET_ID_OP_OFST 0
+/* Cryptographic nonce that TSAC generates and sends to TSAN. TSAC generates
+ * the nonce every time as part of the TSAN post-binding authentication
+ * procedure when the TSAN-TSAC connection terminates and TSAN does need to re-
+ * connect to the TSAC. Refer to SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_IN_GET_ID_NONCE_OFST 4
+#define MC_CMD_TSA_BIND_IN_GET_ID_NONCE_LEN 16
+
+/* MC_CMD_TSA_BIND_IN_GET_TICKET msgrequest */
+#define MC_CMD_TSA_BIND_IN_GET_TICKET_LEN 4
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_GET_TICKET_OP_OFST 0
+
+/* MC_CMD_TSA_BIND_IN_SET_KEY msgrequest */
+#define MC_CMD_TSA_BIND_IN_SET_KEY_LENMIN 5
+#define MC_CMD_TSA_BIND_IN_SET_KEY_LENMAX 252
+#define MC_CMD_TSA_BIND_IN_SET_KEY_LEN(num) (4+1*(num))
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_SET_KEY_OP_OFST 0
+/* This data blob contains the private key generated by the TSAC. TSAN uses
+ * this key for a signing operation. Note- This private key is used in
+ * conjunction with the post-binding TSAN authentication procedure that occurs
+ * when the TSAN-TSAC connection terminates and TSAN tries to reconnect. Refer
+ * to SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_OFST 4
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_LEN 1
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MINNUM 1
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MAXNUM 248
+
+/* MC_CMD_TSA_BIND_IN_UNBIND msgrequest: Asks for the un-binding procedure */
+#define MC_CMD_TSA_BIND_IN_UNBIND_LEN 10
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_UNBIND_OP_OFST 0
+/* TSAN unique identifier for the network adapter */
+#define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_OFST 4
+#define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_LEN 6
+
+/* MC_CMD_TSA_BIND_OUT_GET_ID msgresponse */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMIN 15
+#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMAX 252
+#define MC_CMD_TSA_BIND_OUT_GET_ID_LEN(num) (14+1*(num))
+/* The operation completion code. */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_OP_OFST 0
+/* Rules engine type. Note- The rules engine type allows TSAC to further
+ * identify the connected endpoint (e.g. TSAN, NIC Emulator) type and take the
+ * proper action accordingly. As an example, TSAC uses the rules engine type to
+ * select the SF key that differs in the case of TSAN vs. NIC Emulator.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_OFST 4
+/* enum: Hardware rules engine. */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_TSAN 0x1
+/* enum: Nic emulator rules engine. */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_NEMU 0x2
+/* enum: SSFE. */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_SSFE 0x3
+/* TSAN unique identifier for the network adapter */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_TSANID_OFST 8
+#define MC_CMD_TSA_BIND_OUT_GET_ID_TSANID_LEN 6
+/* The signature data blob. The signature is computed against the message
+ * formed by TSAN ID concatenated with the NONCE value. Refer to SF-115479-TC
+ * for more information also in respect to the private keys that are used to
+ * sign the message based on TSAN pre/post-binding authentication procedure.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_OFST 14
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_LEN 1
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_MINNUM 1
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_MAXNUM 238
+
+/* MC_CMD_TSA_BIND_OUT_GET_TICKET msgresponse */
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMIN 5
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMAX 252
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LEN(num) (4+1*(num))
+/* The operation completion code. */
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_OP_OFST 0
+/* The ticket represents the data blob construct that TSAN sends to TSAC as
+ * part of the binding protocol. From the TSAN perspective the ticket is an
+ * opaque construct. For more info refer to SF-115479-TC.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_OFST 4
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_LEN 1
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_MINNUM 1
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_MAXNUM 248
+
+/* MC_CMD_TSA_BIND_OUT_SET_KEY msgresponse */
+#define MC_CMD_TSA_BIND_OUT_SET_KEY_LEN 4
+/* The operation completion code. */
+#define MC_CMD_TSA_BIND_OUT_SET_KEY_OP_OFST 0
+
+/* MC_CMD_TSA_BIND_OUT_UNBIND msgresponse */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_LEN 8
+/* Same as MC_CMD_ERR field, but included as 0 in success cases */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_RESULT_OFST 0
+/* Extra status information */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_INFO_OFST 4
+/* enum: Unbind successful. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_OK_UNBOUND 0x0
+/* enum: TSANID mismatch */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_BAD_TSANID 0x1
+/* enum: Unable to remove the binding ticket from persistent storage. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_REMOVE_TICKET 0x2
+/* enum: TSAN is not bound to a binding ticket. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_NOT_BOUND 0x3
+
+
+/***********************************/
+/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE
+ * Manage the persistent NVRAM cache of security rules created with
+ * MC_CMD_SET_SECURITY_RULE. Note that the cache is not automatically updated
+ * as rules are added or removed; the active ruleset must be explicitly
+ * committed to the cache. The cache may also be explicitly invalidated,
+ * without affecting the currently active ruleset. When the cache is valid, it
+ * will be loaded at power on or MC reboot, instead of the default ruleset.
+ * Rollback of the currently active ruleset to the cached version (when it is
+ * valid) is also supported. (Medford-only; for use by SolarSecure apps, not
+ * directly by drivers. See SF-114946-SW.) NOTE - this message definition is
+ * provisional. It has not yet been used in any released code and may change
+ * during development. This note will be removed once it is regarded as stable.
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE 0x11a
+#undef MC_CMD_0x11a_PRIVILEGE_CTG
+
+#define MC_CMD_0x11a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN msgrequest */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_LEN 4
+/* the operation to perform */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_OFST 0
+/* enum: reports the ruleset version that is cached in persistent storage but
+ * performs no other action
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_GET_CACHED_VERSION 0x0
+/* enum: rolls back the active state to the cached version. (May fail with
+ * ENOENT if there is no valid cached version.)
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_ROLLBACK 0x1
+/* enum: commits the active state to the persistent cache */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_COMMIT 0x2
+/* enum: invalidates the persistent cache without affecting the active state */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_INVALIDATE 0x3
+
+/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT msgresponse */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMIN 5
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMAX 252
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LEN(num) (4+1*(num))
+/* indicates whether the persistent cache is valid (after completion of the
+ * requested operation in the case of rollback, commit, or invalidate)
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_OFST 0
+/* enum: persistent cache is invalid (the VERSION field will be empty in this
+ * case)
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_INVALID 0x0
+/* enum: persistent cache is valid */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_VALID 0x1
+/* cached ruleset version (after completion of the requested operation, in the
+ * case of rollback, commit, or invalidate) as an opaque hash value in the same
+ * form as MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_OFST 4
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_LEN 1
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_MINNUM 1
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_NVRAM_PRIVATE_APPEND
+ * Append a single TLV to the MC_USAGE_TLV partition. Returns MC_CMD_ERR_EEXIST
+ * if the tag is already present.
+ */
+#define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c
+#undef MC_CMD_0x11c_PRIVILEGE_CTG
+
+#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX 252
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
+/* The tag to be appended */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
+/* The length of the data */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
+/* The data to be contained in the TLV structure */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM 244
+
+/* MC_CMD_NVRAM_PRIVATE_APPEND_OUT msgresponse */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_VERIFY_CONTENTS
+ * Verify that the contents of the XPM memory is correct (Medford only). This
+ * is used during manufacture to check that the XPM memory has been programmed
+ * correctly at ATE.
+ */
+#define MC_CMD_XPM_VERIFY_CONTENTS 0x11b
+#undef MC_CMD_0x11b_PRIVILEGE_CTG
+
+#define MC_CMD_0x11b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_VERIFY_CONTENTS_IN msgrequest */
+#define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
+/* Data type to be checked */
+#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
+
+/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX 252
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
+/* Number of sectors found (test builds only) */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
+/* Number of bytes found (test builds only) */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
+/* Length of signature */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
+/* Signature */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MINNUM 0
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM 240
+
+
+/***********************************/
+/* MC_CMD_SET_EVQ_TMR
+ * Update the timer load, timer reload and timer mode values for a given EVQ.
+ * The requested timer values (in TMR_LOAD_REQ_NS and TMR_RELOAD_REQ_NS) will
+ * be rounded up to the granularity supported by the hardware, then truncated
+ * to the range supported by the hardware. The resulting value after the
+ * rounding and truncation will be returned to the caller (in TMR_LOAD_ACT_NS
+ * and TMR_RELOAD_ACT_NS).
+ */
+#define MC_CMD_SET_EVQ_TMR 0x120
+#undef MC_CMD_0x120_PRIVILEGE_CTG
+
+#define MC_CMD_0x120_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_EVQ_TMR_IN msgrequest */
+#define MC_CMD_SET_EVQ_TMR_IN_LEN 16
+/* Function-relative queue instance */
+#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0
+/* Requested value for timer load (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4
+/* Requested value for timer reload (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8
+/* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */
+
+/* MC_CMD_SET_EVQ_TMR_OUT msgresponse */
+#define MC_CMD_SET_EVQ_TMR_OUT_LEN 8
+/* Actual value for timer load (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0
+/* Actual value for timer reload (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4
+
+
+/***********************************/
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES
+ * Query properties about the event queue timers.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES 0x122
+#undef MC_CMD_0x122_PRIVILEGE_CTG
+
+#define MC_CMD_0x122_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES_IN msgrequest */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_IN_LEN 0
+
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT msgresponse */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36
+/* Reserved for future use. */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0
+/* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in
+ * nanoseconds) for each increment of the timer load/reload count. The
+ * requested duration of a timer is this value multiplied by the timer
+ * load/reload count.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4
+/* For timers updated via writes to EVQ_TMR_REG, this is the maximum value
+ * allowed for timer load/reload counts.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8
+/* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a
+ * multiple of this step size will be rounded in an implementation defined
+ * manner.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12
+/* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only
+ * meaningful if MC_CMD_SET_EVQ_TMR is implemented.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16
+/* Timer durations requested via MCDI that are not a multiple of this step size
+ * will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20
+/* For timers updated using the bug35388 workaround, this is the time interval
+ * (in nanoseconds) for each increment of the timer load/reload count. The
+ * requested duration of a timer is this value multiplied by the timer
+ * load/reload count. This field is only meaningful if the bug35388 workaround
+ * is enabled.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24
+/* For timers updated using the bug35388 workaround, this is the maximum value
+ * allowed for timer load/reload counts. This field is only meaningful if the
+ * bug35388 workaround is enabled.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28
+/* For timers updated using the bug35388 workaround, timer load/reload counts
+ * not a multiple of this step size will be rounded in an implementation
+ * defined manner. This field is only meaningful if the bug35388 workaround is
+ * enabled.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
+
+
+/***********************************/
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP
+ * When we use the TX_vFIFO_ULL mode, we can allocate common pools using the
+ * non used switch buffers.
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d
+#undef MC_CMD_0x11d_PRIVILEGE_CTG
+
+#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
+/* Will the common pool be used as TX_vFIFO_ULL (1) */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */
+/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0
+/* Number of buffers to reserve for the common pool */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
+/* TX datapath to which the Common Pool is connected to. */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
+/* enum: Extracts information from function */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1
+/* Network port or RX Engine to which the common pool connects. */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
+/* enum: Extracts information from function */
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */
+/* enum: To enable Switch loopback with Rx engine 0 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4
+/* enum: To enable Switch loopback with Rx engine 1 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
+/* ID of the common pool allocated */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO
+ * When we use the TX_vFIFO_ULL mode, we can allocate vFIFOs using the
+ * previously allocated common pools.
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e
+#undef MC_CMD_0x11e_PRIVILEGE_CTG
+
+#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20
+/* Common pool previously allocated to which the new vFIFO will be associated
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
+/* Port or RX engine to associate the vFIFO egress */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
+/* enum: Extracts information from common pool */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */
+/* enum: To enable Switch loopback with Rx engine 0 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4
+/* enum: To enable Switch loopback with Rx engine 1 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5
+/* Minimum number of buffers that the pool must have */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
+/* enum: Do not check the space available */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0
+/* Will the vFIFO be used as TX_vFIFO_ULL */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
+/* Network priority of the vFIFO,if applicable */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
+/* enum: Search for the lowest unused priority */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
+/* Short vFIFO ID */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
+/* Network priority of the vFIFO */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
+
+
+/***********************************/
+/* MC_CMD_TEARDOWN_TX_VFIFO_VF
+ * This interface clears the configuration of the given vFIFO and leaves it
+ * ready to be re-used.
+ */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f
+#undef MC_CMD_0x11f_PRIVILEGE_CTG
+
+#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
+/* Short vFIFO ID */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
+
+/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DEALLOCATE_TX_VFIFO_CP
+ * This interface clears the configuration of the given common pool and leaves
+ * it ready to be re-used.
+ */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121
+#undef MC_CMD_0x121_PRIVILEGE_CTG
+
+#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
+/* Common pool ID given when pool allocated */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
+
+/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_REKEY
+ * This request causes the NIC to generate a new per-NIC key and program it
+ * into the write-once memory. During the process all flash partitions that are
+ * protected with a CMAC are verified with the old per-NIC key and then signed
+ * with the new per-NIC key. If the NIC has already reached its rekey limit the
+ * REKEY op will return MC_CMD_ERR_ERANGE. The REKEY op may block until
+ * completion or it may return 0 and continue processing, therefore the caller
+ * must poll at least once to confirm that the rekeying has completed. The POLL
+ * operation returns MC_CMD_ERR_EBUSY if the rekey process is still running
+ * otherwise it will return the result of the last completed rekey operation,
+ * or 0 if there has not been a previous rekey.
+ */
+#define MC_CMD_REKEY 0x123
+#undef MC_CMD_0x123_PRIVILEGE_CTG
+
+#define MC_CMD_0x123_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_REKEY_IN msgrequest */
+#define MC_CMD_REKEY_IN_LEN 4
+/* the type of operation requested */
+#define MC_CMD_REKEY_IN_OP_OFST 0
+/* enum: Start the rekeying operation */
+#define MC_CMD_REKEY_IN_OP_REKEY 0x0
+/* enum: Poll for completion of the rekeying operation */
+#define MC_CMD_REKEY_IN_OP_POLL 0x1
+
+/* MC_CMD_REKEY_OUT msgresponse */
+#define MC_CMD_REKEY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS
+ * This interface allows the host to find out how many common pool buffers are
+ * not yet assigned.
+ */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124
+#undef MC_CMD_0x124_PRIVILEGE_CTG
+
+#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0
+
+/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT msgresponse */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
+/* Available buffers for the ENG to NET vFIFOs. */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
+/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
+
+
+/***********************************/
+/* MC_CMD_SET_SECURITY_FUSES
+ * Change the security level of the adapter by setting bits in the write-once
+ * memory. The firmware maps each flag in the message to a set of one or more
+ * hardware-defined or software-defined bits and sets these bits in the write-
+ * once memory. For Medford the hardware-defined bits are defined in
+ * SF-112079-PS 5.3, the software-defined bits are defined in xpm.h. Returns 0
+ * if all of the required bits were set and returns MC_CMD_ERR_EIO if any of
+ * the required bits were not set.
+ */
+#define MC_CMD_SET_SECURITY_FUSES 0x126
+#undef MC_CMD_0x126_PRIVILEGE_CTG
+
+#define MC_CMD_0x126_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_SECURITY_FUSES_IN msgrequest */
+#define MC_CMD_SET_SECURITY_FUSES_IN_LEN 4
+/* Flags specifying what type of security features are being set */
+#define MC_CMD_SET_SECURITY_FUSES_IN_FLAGS_OFST 0
+#define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_LBN 0
+#define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_WIDTH 1
+#define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_LBN 1
+#define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_WIDTH 1
+
+/* MC_CMD_SET_SECURITY_FUSES_OUT msgresponse */
+#define MC_CMD_SET_SECURITY_FUSES_OUT_LEN 0
+
+#endif /* _SIENA_MC_DRIVER_PCOL_H */
+/*! \cidoxg_end */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_regs_pci.h b/src/seastar/dpdk/drivers/net/sfc/base/efx_regs_pci.h
new file mode 100644
index 00000000..f90f9565
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_regs_pci.h
@@ -0,0 +1,2356 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_REGS_PCI_H
+#define _SYS_EFX_REGS_PCI_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * PC_VEND_ID_REG(16bit):
+ * Vendor ID register
+ */
+
+#define PCR_AZ_VEND_ID_REG 0x00000000
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_VEND_ID_LBN 0
+#define PCRF_AZ_VEND_ID_WIDTH 16
+
+
+/*
+ * PC_DEV_ID_REG(16bit):
+ * Device ID register
+ */
+
+#define PCR_AZ_DEV_ID_REG 0x00000002
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_DEV_ID_LBN 0
+#define PCRF_AZ_DEV_ID_WIDTH 16
+
+
+/*
+ * PC_CMD_REG(16bit):
+ * Command register
+ */
+
+#define PCR_AZ_CMD_REG 0x00000004
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_INTX_DIS_LBN 10
+#define PCRF_AZ_INTX_DIS_WIDTH 1
+#define PCRF_AZ_FB2B_EN_LBN 9
+#define PCRF_AZ_FB2B_EN_WIDTH 1
+#define PCRF_AZ_SERR_EN_LBN 8
+#define PCRF_AZ_SERR_EN_WIDTH 1
+#define PCRF_AZ_IDSEL_CTL_LBN 7
+#define PCRF_AZ_IDSEL_CTL_WIDTH 1
+#define PCRF_AZ_PERR_EN_LBN 6
+#define PCRF_AZ_PERR_EN_WIDTH 1
+#define PCRF_AZ_VGA_PAL_SNP_LBN 5
+#define PCRF_AZ_VGA_PAL_SNP_WIDTH 1
+#define PCRF_AZ_MWI_EN_LBN 4
+#define PCRF_AZ_MWI_EN_WIDTH 1
+#define PCRF_AZ_SPEC_CYC_LBN 3
+#define PCRF_AZ_SPEC_CYC_WIDTH 1
+#define PCRF_AZ_MST_EN_LBN 2
+#define PCRF_AZ_MST_EN_WIDTH 1
+#define PCRF_AZ_MEM_EN_LBN 1
+#define PCRF_AZ_MEM_EN_WIDTH 1
+#define PCRF_AZ_IO_EN_LBN 0
+#define PCRF_AZ_IO_EN_WIDTH 1
+
+
+/*
+ * PC_STAT_REG(16bit):
+ * Status register
+ */
+
+#define PCR_AZ_STAT_REG 0x00000006
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_DET_PERR_LBN 15
+#define PCRF_AZ_DET_PERR_WIDTH 1
+#define PCRF_AZ_SIG_SERR_LBN 14
+#define PCRF_AZ_SIG_SERR_WIDTH 1
+#define PCRF_AZ_GOT_MABRT_LBN 13
+#define PCRF_AZ_GOT_MABRT_WIDTH 1
+#define PCRF_AZ_GOT_TABRT_LBN 12
+#define PCRF_AZ_GOT_TABRT_WIDTH 1
+#define PCRF_AZ_SIG_TABRT_LBN 11
+#define PCRF_AZ_SIG_TABRT_WIDTH 1
+#define PCRF_AZ_DEVSEL_TIM_LBN 9
+#define PCRF_AZ_DEVSEL_TIM_WIDTH 2
+#define PCRF_AZ_MDAT_PERR_LBN 8
+#define PCRF_AZ_MDAT_PERR_WIDTH 1
+#define PCRF_AZ_FB2B_CAP_LBN 7
+#define PCRF_AZ_FB2B_CAP_WIDTH 1
+#define PCRF_AZ_66MHZ_CAP_LBN 5
+#define PCRF_AZ_66MHZ_CAP_WIDTH 1
+#define PCRF_AZ_CAP_LIST_LBN 4
+#define PCRF_AZ_CAP_LIST_WIDTH 1
+#define PCRF_AZ_INTX_STAT_LBN 3
+#define PCRF_AZ_INTX_STAT_WIDTH 1
+
+
+/*
+ * PC_REV_ID_REG(8bit):
+ * Class code & revision ID register
+ */
+
+#define PCR_AZ_REV_ID_REG 0x00000008
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_REV_ID_LBN 0
+#define PCRF_AZ_REV_ID_WIDTH 8
+
+
+/*
+ * PC_CC_REG(24bit):
+ * Class code register
+ */
+
+#define PCR_AZ_CC_REG 0x00000009
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BASE_CC_LBN 16
+#define PCRF_AZ_BASE_CC_WIDTH 8
+#define PCRF_AZ_SUB_CC_LBN 8
+#define PCRF_AZ_SUB_CC_WIDTH 8
+#define PCRF_AZ_PROG_IF_LBN 0
+#define PCRF_AZ_PROG_IF_WIDTH 8
+
+
+/*
+ * PC_CACHE_LSIZE_REG(8bit):
+ * Cache line size
+ */
+
+#define PCR_AZ_CACHE_LSIZE_REG 0x0000000c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_CACHE_LSIZE_LBN 0
+#define PCRF_AZ_CACHE_LSIZE_WIDTH 8
+
+
+/*
+ * PC_MST_LAT_REG(8bit):
+ * Master latency timer register
+ */
+
+#define PCR_AZ_MST_LAT_REG 0x0000000d
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MST_LAT_LBN 0
+#define PCRF_AZ_MST_LAT_WIDTH 8
+
+
+/*
+ * PC_HDR_TYPE_REG(8bit):
+ * Header type register
+ */
+
+#define PCR_AZ_HDR_TYPE_REG 0x0000000e
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MULT_FUNC_LBN 7
+#define PCRF_AZ_MULT_FUNC_WIDTH 1
+#define PCRF_AZ_TYPE_LBN 0
+#define PCRF_AZ_TYPE_WIDTH 7
+
+
+/*
+ * PC_BIST_REG(8bit):
+ * BIST register
+ */
+
+#define PCR_AZ_BIST_REG 0x0000000f
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BIST_LBN 0
+#define PCRF_AZ_BIST_WIDTH 8
+
+
+/*
+ * PC_BAR0_REG(32bit):
+ * Primary function base address register 0
+ */
+
+#define PCR_AZ_BAR0_REG 0x00000010
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BAR0_LBN 4
+#define PCRF_AZ_BAR0_WIDTH 28
+#define PCRF_AZ_BAR0_PREF_LBN 3
+#define PCRF_AZ_BAR0_PREF_WIDTH 1
+#define PCRF_AZ_BAR0_TYPE_LBN 1
+#define PCRF_AZ_BAR0_TYPE_WIDTH 2
+#define PCRF_AZ_BAR0_IOM_LBN 0
+#define PCRF_AZ_BAR0_IOM_WIDTH 1
+
+
+/*
+ * PC_BAR1_REG(32bit):
+ * Primary function base address register 1, BAR1 is not implemented so read only.
+ */
+
+#define PCR_DZ_BAR1_REG 0x00000014
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_BAR1_LBN 0
+#define PCRF_DZ_BAR1_WIDTH 32
+
+
+/*
+ * PC_BAR2_LO_REG(32bit):
+ * Primary function base address register 2 low bits
+ */
+
+#define PCR_AZ_BAR2_LO_REG 0x00000018
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BAR2_LO_LBN 4
+#define PCRF_AZ_BAR2_LO_WIDTH 28
+#define PCRF_AZ_BAR2_PREF_LBN 3
+#define PCRF_AZ_BAR2_PREF_WIDTH 1
+#define PCRF_AZ_BAR2_TYPE_LBN 1
+#define PCRF_AZ_BAR2_TYPE_WIDTH 2
+#define PCRF_AZ_BAR2_IOM_LBN 0
+#define PCRF_AZ_BAR2_IOM_WIDTH 1
+
+
+/*
+ * PC_BAR2_HI_REG(32bit):
+ * Primary function base address register 2 high bits
+ */
+
+#define PCR_AZ_BAR2_HI_REG 0x0000001c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BAR2_HI_LBN 0
+#define PCRF_AZ_BAR2_HI_WIDTH 32
+
+
+/*
+ * PC_BAR4_LO_REG(32bit):
+ * Primary function base address register 2 low bits
+ */
+
+#define PCR_CZ_BAR4_LO_REG 0x00000020
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_BAR4_LO_LBN 4
+#define PCRF_CZ_BAR4_LO_WIDTH 28
+#define PCRF_CZ_BAR4_PREF_LBN 3
+#define PCRF_CZ_BAR4_PREF_WIDTH 1
+#define PCRF_CZ_BAR4_TYPE_LBN 1
+#define PCRF_CZ_BAR4_TYPE_WIDTH 2
+#define PCRF_CZ_BAR4_IOM_LBN 0
+#define PCRF_CZ_BAR4_IOM_WIDTH 1
+
+
+/*
+ * PC_BAR4_HI_REG(32bit):
+ * Primary function base address register 2 high bits
+ */
+
+#define PCR_CZ_BAR4_HI_REG 0x00000024
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_BAR4_HI_LBN 0
+#define PCRF_CZ_BAR4_HI_WIDTH 32
+
+
+/*
+ * PC_SS_VEND_ID_REG(16bit):
+ * Sub-system vendor ID register
+ */
+
+#define PCR_AZ_SS_VEND_ID_REG 0x0000002c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_SS_VEND_ID_LBN 0
+#define PCRF_AZ_SS_VEND_ID_WIDTH 16
+
+
+/*
+ * PC_SS_ID_REG(16bit):
+ * Sub-system ID register
+ */
+
+#define PCR_AZ_SS_ID_REG 0x0000002e
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_SS_ID_LBN 0
+#define PCRF_AZ_SS_ID_WIDTH 16
+
+
+/*
+ * PC_EXPROM_BAR_REG(32bit):
+ * Expansion ROM base address register
+ */
+
+#define PCR_AZ_EXPROM_BAR_REG 0x00000030
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_EXPROM_BAR_LBN 11
+#define PCRF_AZ_EXPROM_BAR_WIDTH 21
+#define PCRF_AB_EXPROM_MIN_SIZE_LBN 2
+#define PCRF_AB_EXPROM_MIN_SIZE_WIDTH 9
+#define PCRF_CZ_EXPROM_MIN_SIZE_LBN 1
+#define PCRF_CZ_EXPROM_MIN_SIZE_WIDTH 10
+#define PCRF_AB_EXPROM_FEATURE_ENABLE_LBN 1
+#define PCRF_AB_EXPROM_FEATURE_ENABLE_WIDTH 1
+#define PCRF_AZ_EXPROM_EN_LBN 0
+#define PCRF_AZ_EXPROM_EN_WIDTH 1
+
+
+/*
+ * PC_CAP_PTR_REG(8bit):
+ * Capability pointer register
+ */
+
+#define PCR_AZ_CAP_PTR_REG 0x00000034
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_CAP_PTR_LBN 0
+#define PCRF_AZ_CAP_PTR_WIDTH 8
+
+
+/*
+ * PC_INT_LINE_REG(8bit):
+ * Interrupt line register
+ */
+
+#define PCR_AZ_INT_LINE_REG 0x0000003c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_INT_LINE_LBN 0
+#define PCRF_AZ_INT_LINE_WIDTH 8
+
+
+/*
+ * PC_INT_PIN_REG(8bit):
+ * Interrupt pin register
+ */
+
+#define PCR_AZ_INT_PIN_REG 0x0000003d
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_INT_PIN_LBN 0
+#define PCRF_AZ_INT_PIN_WIDTH 8
+#define PCFE_DZ_INTPIN_INTD 4
+#define PCFE_DZ_INTPIN_INTC 3
+#define PCFE_DZ_INTPIN_INTB 2
+#define PCFE_DZ_INTPIN_INTA 1
+
+
+/*
+ * PC_PM_CAP_ID_REG(8bit):
+ * Power management capability ID
+ */
+
+#define PCR_AZ_PM_CAP_ID_REG 0x00000040
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_CAP_ID_LBN 0
+#define PCRF_AZ_PM_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_PM_NXT_PTR_REG(8bit):
+ * Power management next item pointer
+ */
+
+#define PCR_AZ_PM_NXT_PTR_REG 0x00000041
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_NXT_PTR_LBN 0
+#define PCRF_AZ_PM_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_PM_CAP_REG(16bit):
+ * Power management capabilities register
+ */
+
+#define PCR_AZ_PM_CAP_REG 0x00000042
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_PME_SUPT_LBN 11
+#define PCRF_AZ_PM_PME_SUPT_WIDTH 5
+#define PCRF_AZ_PM_D2_SUPT_LBN 10
+#define PCRF_AZ_PM_D2_SUPT_WIDTH 1
+#define PCRF_AZ_PM_D1_SUPT_LBN 9
+#define PCRF_AZ_PM_D1_SUPT_WIDTH 1
+#define PCRF_AZ_PM_AUX_CURR_LBN 6
+#define PCRF_AZ_PM_AUX_CURR_WIDTH 3
+#define PCRF_AZ_PM_DSI_LBN 5
+#define PCRF_AZ_PM_DSI_WIDTH 1
+#define PCRF_AZ_PM_PME_CLK_LBN 3
+#define PCRF_AZ_PM_PME_CLK_WIDTH 1
+#define PCRF_AZ_PM_PME_VER_LBN 0
+#define PCRF_AZ_PM_PME_VER_WIDTH 3
+
+
+/*
+ * PC_PM_CS_REG(16bit):
+ * Power management control & status register
+ */
+
+#define PCR_AZ_PM_CS_REG 0x00000044
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_PME_STAT_LBN 15
+#define PCRF_AZ_PM_PME_STAT_WIDTH 1
+#define PCRF_AZ_PM_DAT_SCALE_LBN 13
+#define PCRF_AZ_PM_DAT_SCALE_WIDTH 2
+#define PCRF_AZ_PM_DAT_SEL_LBN 9
+#define PCRF_AZ_PM_DAT_SEL_WIDTH 4
+#define PCRF_AZ_PM_PME_EN_LBN 8
+#define PCRF_AZ_PM_PME_EN_WIDTH 1
+#define PCRF_CZ_NO_SOFT_RESET_LBN 3
+#define PCRF_CZ_NO_SOFT_RESET_WIDTH 1
+#define PCRF_AZ_PM_PWR_ST_LBN 0
+#define PCRF_AZ_PM_PWR_ST_WIDTH 2
+
+
+/*
+ * PC_MSI_CAP_ID_REG(8bit):
+ * MSI capability ID
+ */
+
+#define PCR_AZ_MSI_CAP_ID_REG 0x00000050
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_CAP_ID_LBN 0
+#define PCRF_AZ_MSI_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_MSI_NXT_PTR_REG(8bit):
+ * MSI next item pointer
+ */
+
+#define PCR_AZ_MSI_NXT_PTR_REG 0x00000051
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_NXT_PTR_LBN 0
+#define PCRF_AZ_MSI_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_MSI_CTL_REG(16bit):
+ * MSI control register
+ */
+
+#define PCR_AZ_MSI_CTL_REG 0x00000052
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_64_EN_LBN 7
+#define PCRF_AZ_MSI_64_EN_WIDTH 1
+#define PCRF_AZ_MSI_MULT_MSG_EN_LBN 4
+#define PCRF_AZ_MSI_MULT_MSG_EN_WIDTH 3
+#define PCRF_AZ_MSI_MULT_MSG_CAP_LBN 1
+#define PCRF_AZ_MSI_MULT_MSG_CAP_WIDTH 3
+#define PCRF_AZ_MSI_EN_LBN 0
+#define PCRF_AZ_MSI_EN_WIDTH 1
+
+
+/*
+ * PC_MSI_ADR_LO_REG(32bit):
+ * MSI low 32 bits address register
+ */
+
+#define PCR_AZ_MSI_ADR_LO_REG 0x00000054
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_ADR_LO_LBN 2
+#define PCRF_AZ_MSI_ADR_LO_WIDTH 30
+
+
+/*
+ * PC_MSI_ADR_HI_REG(32bit):
+ * MSI high 32 bits address register
+ */
+
+#define PCR_AZ_MSI_ADR_HI_REG 0x00000058
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_ADR_HI_LBN 0
+#define PCRF_AZ_MSI_ADR_HI_WIDTH 32
+
+
+/*
+ * PC_MSI_DAT_REG(16bit):
+ * MSI data register
+ */
+
+#define PCR_AZ_MSI_DAT_REG 0x0000005c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_DAT_LBN 0
+#define PCRF_AZ_MSI_DAT_WIDTH 16
+
+
+/*
+ * PC_PCIE_CAP_LIST_REG(16bit):
+ * PCIe capability list register
+ */
+
+#define PCR_AB_PCIE_CAP_LIST_REG 0x00000060
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_PCIE_CAP_LIST_REG 0x00000070
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PCIE_NXT_PTR_LBN 8
+#define PCRF_AZ_PCIE_NXT_PTR_WIDTH 8
+#define PCRF_AZ_PCIE_CAP_ID_LBN 0
+#define PCRF_AZ_PCIE_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_PCIE_CAP_REG(16bit):
+ * PCIe capability register
+ */
+
+#define PCR_AB_PCIE_CAP_REG 0x00000062
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_PCIE_CAP_REG 0x00000072
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PCIE_INT_MSG_NUM_LBN 9
+#define PCRF_AZ_PCIE_INT_MSG_NUM_WIDTH 5
+#define PCRF_AZ_PCIE_SLOT_IMP_LBN 8
+#define PCRF_AZ_PCIE_SLOT_IMP_WIDTH 1
+#define PCRF_AZ_PCIE_DEV_PORT_TYPE_LBN 4
+#define PCRF_AZ_PCIE_DEV_PORT_TYPE_WIDTH 4
+#define PCRF_AZ_PCIE_CAP_VER_LBN 0
+#define PCRF_AZ_PCIE_CAP_VER_WIDTH 4
+
+
+/*
+ * PC_DEV_CAP_REG(32bit):
+ * PCIe device capabilities register
+ */
+
+#define PCR_AB_DEV_CAP_REG 0x00000064
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_DEV_CAP_REG 0x00000074
+/* sienaa0=pci_f0_config,hunta0=pci_f0_config */
+
+#define PCRF_CZ_CAP_FN_LEVEL_RESET_LBN 28
+#define PCRF_CZ_CAP_FN_LEVEL_RESET_WIDTH 1
+#define PCRF_AZ_CAP_SLOT_PWR_SCL_LBN 26
+#define PCRF_AZ_CAP_SLOT_PWR_SCL_WIDTH 2
+#define PCRF_AZ_CAP_SLOT_PWR_VAL_LBN 18
+#define PCRF_AZ_CAP_SLOT_PWR_VAL_WIDTH 8
+#define PCRF_CZ_ROLE_BASE_ERR_REPORTING_LBN 15
+#define PCRF_CZ_ROLE_BASE_ERR_REPORTING_WIDTH 1
+#define PCRF_AB_PWR_IND_LBN 14
+#define PCRF_AB_PWR_IND_WIDTH 1
+#define PCRF_AB_ATTN_IND_LBN 13
+#define PCRF_AB_ATTN_IND_WIDTH 1
+#define PCRF_AB_ATTN_BUTTON_LBN 12
+#define PCRF_AB_ATTN_BUTTON_WIDTH 1
+#define PCRF_AZ_ENDPT_L1_LAT_LBN 9
+#define PCRF_AZ_ENDPT_L1_LAT_WIDTH 3
+#define PCRF_AZ_ENDPT_L0_LAT_LBN 6
+#define PCRF_AZ_ENDPT_L0_LAT_WIDTH 3
+#define PCRF_AZ_TAG_FIELD_LBN 5
+#define PCRF_AZ_TAG_FIELD_WIDTH 1
+#define PCRF_AZ_PHAN_FUNC_LBN 3
+#define PCRF_AZ_PHAN_FUNC_WIDTH 2
+#define PCRF_AZ_MAX_PAYL_SIZE_SUPT_LBN 0
+#define PCRF_AZ_MAX_PAYL_SIZE_SUPT_WIDTH 3
+
+
+/*
+ * PC_DEV_CTL_REG(16bit):
+ * PCIe device control register
+ */
+
+#define PCR_AB_DEV_CTL_REG 0x00000068
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_DEV_CTL_REG 0x00000078
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_FN_LEVEL_RESET_LBN 15
+#define PCRF_CZ_FN_LEVEL_RESET_WIDTH 1
+#define PCRF_AZ_MAX_RD_REQ_SIZE_LBN 12
+#define PCRF_AZ_MAX_RD_REQ_SIZE_WIDTH 3
+#define PCFE_AZ_MAX_RD_REQ_SIZE_4096 5
+#define PCFE_AZ_MAX_RD_REQ_SIZE_2048 4
+#define PCFE_AZ_MAX_RD_REQ_SIZE_1024 3
+#define PCFE_AZ_MAX_RD_REQ_SIZE_512 2
+#define PCFE_AZ_MAX_RD_REQ_SIZE_256 1
+#define PCFE_AZ_MAX_RD_REQ_SIZE_128 0
+#define PCRF_AZ_EN_NO_SNOOP_LBN 11
+#define PCRF_AZ_EN_NO_SNOOP_WIDTH 1
+#define PCRF_AZ_AUX_PWR_PM_EN_LBN 10
+#define PCRF_AZ_AUX_PWR_PM_EN_WIDTH 1
+#define PCRF_AZ_PHAN_FUNC_EN_LBN 9
+#define PCRF_AZ_PHAN_FUNC_EN_WIDTH 1
+#define PCRF_AB_DEV_CAP_REG_RSVD0_LBN 8
+#define PCRF_AB_DEV_CAP_REG_RSVD0_WIDTH 1
+#define PCRF_CZ_EXTENDED_TAG_EN_LBN 8
+#define PCRF_CZ_EXTENDED_TAG_EN_WIDTH 1
+#define PCRF_AZ_MAX_PAYL_SIZE_LBN 5
+#define PCRF_AZ_MAX_PAYL_SIZE_WIDTH 3
+#define PCFE_AZ_MAX_PAYL_SIZE_4096 5
+#define PCFE_AZ_MAX_PAYL_SIZE_2048 4
+#define PCFE_AZ_MAX_PAYL_SIZE_1024 3
+#define PCFE_AZ_MAX_PAYL_SIZE_512 2
+#define PCFE_AZ_MAX_PAYL_SIZE_256 1
+#define PCFE_AZ_MAX_PAYL_SIZE_128 0
+#define PCRF_AZ_EN_RELAX_ORDER_LBN 4
+#define PCRF_AZ_EN_RELAX_ORDER_WIDTH 1
+#define PCRF_AZ_UNSUP_REQ_RPT_EN_LBN 3
+#define PCRF_AZ_UNSUP_REQ_RPT_EN_WIDTH 1
+#define PCRF_AZ_FATAL_ERR_RPT_EN_LBN 2
+#define PCRF_AZ_FATAL_ERR_RPT_EN_WIDTH 1
+#define PCRF_AZ_NONFATAL_ERR_RPT_EN_LBN 1
+#define PCRF_AZ_NONFATAL_ERR_RPT_EN_WIDTH 1
+#define PCRF_AZ_CORR_ERR_RPT_EN_LBN 0
+#define PCRF_AZ_CORR_ERR_RPT_EN_WIDTH 1
+
+
+/*
+ * PC_DEV_STAT_REG(16bit):
+ * PCIe device status register
+ */
+
+#define PCR_AB_DEV_STAT_REG 0x0000006a
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_DEV_STAT_REG 0x0000007a
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_TRNS_PEND_LBN 5
+#define PCRF_AZ_TRNS_PEND_WIDTH 1
+#define PCRF_AZ_AUX_PWR_DET_LBN 4
+#define PCRF_AZ_AUX_PWR_DET_WIDTH 1
+#define PCRF_AZ_UNSUP_REQ_DET_LBN 3
+#define PCRF_AZ_UNSUP_REQ_DET_WIDTH 1
+#define PCRF_AZ_FATAL_ERR_DET_LBN 2
+#define PCRF_AZ_FATAL_ERR_DET_WIDTH 1
+#define PCRF_AZ_NONFATAL_ERR_DET_LBN 1
+#define PCRF_AZ_NONFATAL_ERR_DET_WIDTH 1
+#define PCRF_AZ_CORR_ERR_DET_LBN 0
+#define PCRF_AZ_CORR_ERR_DET_WIDTH 1
+
+
+/*
+ * PC_LNK_CAP_REG(32bit):
+ * PCIe link capabilities register
+ */
+
+#define PCR_AB_LNK_CAP_REG 0x0000006c
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_LNK_CAP_REG 0x0000007c
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PORT_NUM_LBN 24
+#define PCRF_AZ_PORT_NUM_WIDTH 8
+#define PCRF_DZ_ASPM_OPTIONALITY_CAP_LBN 22
+#define PCRF_DZ_ASPM_OPTIONALITY_CAP_WIDTH 1
+#define PCRF_CZ_LINK_BWDITH_NOTIF_CAP_LBN 21
+#define PCRF_CZ_LINK_BWDITH_NOTIF_CAP_WIDTH 1
+#define PCRF_CZ_DATA_LINK_ACTIVE_RPT_CAP_LBN 20
+#define PCRF_CZ_DATA_LINK_ACTIVE_RPT_CAP_WIDTH 1
+#define PCRF_CZ_SURPISE_DOWN_RPT_CAP_LBN 19
+#define PCRF_CZ_SURPISE_DOWN_RPT_CAP_WIDTH 1
+#define PCRF_CZ_CLOCK_PWR_MNGMNT_CAP_LBN 18
+#define PCRF_CZ_CLOCK_PWR_MNGMNT_CAP_WIDTH 1
+#define PCRF_AZ_DEF_L1_EXIT_LAT_LBN 15
+#define PCRF_AZ_DEF_L1_EXIT_LAT_WIDTH 3
+#define PCRF_AZ_DEF_L0_EXIT_LATPORT_NUM_LBN 12
+#define PCRF_AZ_DEF_L0_EXIT_LATPORT_NUM_WIDTH 3
+#define PCRF_AZ_AS_LNK_PM_SUPT_LBN 10
+#define PCRF_AZ_AS_LNK_PM_SUPT_WIDTH 2
+#define PCRF_AZ_MAX_LNK_WIDTH_LBN 4
+#define PCRF_AZ_MAX_LNK_WIDTH_WIDTH 6
+#define PCRF_AZ_MAX_LNK_SP_LBN 0
+#define PCRF_AZ_MAX_LNK_SP_WIDTH 4
+
+
+/*
+ * PC_LNK_CTL_REG(16bit):
+ * PCIe link control register
+ */
+
+#define PCR_AB_LNK_CTL_REG 0x00000070
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_LNK_CTL_REG 0x00000080
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_EXT_SYNC_LBN 7
+#define PCRF_AZ_EXT_SYNC_WIDTH 1
+#define PCRF_AZ_COMM_CLK_CFG_LBN 6
+#define PCRF_AZ_COMM_CLK_CFG_WIDTH 1
+#define PCRF_AB_LNK_CTL_REG_RSVD0_LBN 5
+#define PCRF_AB_LNK_CTL_REG_RSVD0_WIDTH 1
+#define PCRF_CZ_LNK_RETRAIN_LBN 5
+#define PCRF_CZ_LNK_RETRAIN_WIDTH 1
+#define PCRF_AZ_LNK_DIS_LBN 4
+#define PCRF_AZ_LNK_DIS_WIDTH 1
+#define PCRF_AZ_RD_COM_BDRY_LBN 3
+#define PCRF_AZ_RD_COM_BDRY_WIDTH 1
+#define PCRF_AZ_ACT_ST_LNK_PM_CTL_LBN 0
+#define PCRF_AZ_ACT_ST_LNK_PM_CTL_WIDTH 2
+
+
+/*
+ * PC_LNK_STAT_REG(16bit):
+ * PCIe link status register
+ */
+
+#define PCR_AB_LNK_STAT_REG 0x00000072
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_LNK_STAT_REG 0x00000082
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_SLOT_CLK_CFG_LBN 12
+#define PCRF_AZ_SLOT_CLK_CFG_WIDTH 1
+#define PCRF_AZ_LNK_TRAIN_LBN 11
+#define PCRF_AZ_LNK_TRAIN_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_LBN 10
+#define PCRF_AB_TRAIN_ERR_WIDTH 1
+#define PCRF_AZ_LNK_WIDTH_LBN 4
+#define PCRF_AZ_LNK_WIDTH_WIDTH 6
+#define PCRF_AZ_LNK_SP_LBN 0
+#define PCRF_AZ_LNK_SP_WIDTH 4
+
+
+/*
+ * PC_SLOT_CAP_REG(32bit):
+ * PCIe slot capabilities register
+ */
+
+#define PCR_AB_SLOT_CAP_REG 0x00000074
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_SLOT_NUM_LBN 19
+#define PCRF_AB_SLOT_NUM_WIDTH 13
+#define PCRF_AB_SLOT_PWR_LIM_SCL_LBN 15
+#define PCRF_AB_SLOT_PWR_LIM_SCL_WIDTH 2
+#define PCRF_AB_SLOT_PWR_LIM_VAL_LBN 7
+#define PCRF_AB_SLOT_PWR_LIM_VAL_WIDTH 8
+#define PCRF_AB_SLOT_HP_CAP_LBN 6
+#define PCRF_AB_SLOT_HP_CAP_WIDTH 1
+#define PCRF_AB_SLOT_HP_SURP_LBN 5
+#define PCRF_AB_SLOT_HP_SURP_WIDTH 1
+#define PCRF_AB_SLOT_PWR_IND_PRST_LBN 4
+#define PCRF_AB_SLOT_PWR_IND_PRST_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_IND_PRST_LBN 3
+#define PCRF_AB_SLOT_ATTN_IND_PRST_WIDTH 1
+#define PCRF_AB_SLOT_MRL_SENS_PRST_LBN 2
+#define PCRF_AB_SLOT_MRL_SENS_PRST_WIDTH 1
+#define PCRF_AB_SLOT_PWR_CTL_PRST_LBN 1
+#define PCRF_AB_SLOT_PWR_CTL_PRST_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_BUT_PRST_LBN 0
+#define PCRF_AB_SLOT_ATTN_BUT_PRST_WIDTH 1
+
+
+/*
+ * PC_SLOT_CTL_REG(16bit):
+ * PCIe slot control register
+ */
+
+#define PCR_AB_SLOT_CTL_REG 0x00000078
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_SLOT_PWR_CTLR_CTL_LBN 10
+#define PCRF_AB_SLOT_PWR_CTLR_CTL_WIDTH 1
+#define PCRF_AB_SLOT_PWR_IND_CTL_LBN 8
+#define PCRF_AB_SLOT_PWR_IND_CTL_WIDTH 2
+#define PCRF_AB_SLOT_ATT_IND_CTL_LBN 6
+#define PCRF_AB_SLOT_ATT_IND_CTL_WIDTH 2
+#define PCRF_AB_SLOT_HP_INT_EN_LBN 5
+#define PCRF_AB_SLOT_HP_INT_EN_WIDTH 1
+#define PCRF_AB_SLOT_CMD_COMP_INT_EN_LBN 4
+#define PCRF_AB_SLOT_CMD_COMP_INT_EN_WIDTH 1
+#define PCRF_AB_SLOT_PRES_DET_CHG_EN_LBN 3
+#define PCRF_AB_SLOT_PRES_DET_CHG_EN_WIDTH 1
+#define PCRF_AB_SLOT_MRL_SENS_CHG_EN_LBN 2
+#define PCRF_AB_SLOT_MRL_SENS_CHG_EN_WIDTH 1
+#define PCRF_AB_SLOT_PWR_FLTDET_EN_LBN 1
+#define PCRF_AB_SLOT_PWR_FLTDET_EN_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_BUT_EN_LBN 0
+#define PCRF_AB_SLOT_ATTN_BUT_EN_WIDTH 1
+
+
+/*
+ * PC_SLOT_STAT_REG(16bit):
+ * PCIe slot status register
+ */
+
+#define PCR_AB_SLOT_STAT_REG 0x0000007a
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_PRES_DET_ST_LBN 6
+#define PCRF_AB_PRES_DET_ST_WIDTH 1
+#define PCRF_AB_MRL_SENS_ST_LBN 5
+#define PCRF_AB_MRL_SENS_ST_WIDTH 1
+#define PCRF_AB_SLOT_PWR_IND_LBN 4
+#define PCRF_AB_SLOT_PWR_IND_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_IND_LBN 3
+#define PCRF_AB_SLOT_ATTN_IND_WIDTH 1
+#define PCRF_AB_SLOT_MRL_SENS_LBN 2
+#define PCRF_AB_SLOT_MRL_SENS_WIDTH 1
+#define PCRF_AB_PWR_FLTDET_LBN 1
+#define PCRF_AB_PWR_FLTDET_WIDTH 1
+#define PCRF_AB_ATTN_BUTDET_LBN 0
+#define PCRF_AB_ATTN_BUTDET_WIDTH 1
+
+
+/*
+ * PC_MSIX_CAP_ID_REG(8bit):
+ * MSIX Capability ID
+ */
+
+#define PCR_BB_MSIX_CAP_ID_REG 0x00000090
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_CAP_ID_REG 0x000000b0
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_CAP_ID_LBN 0
+#define PCRF_BZ_MSIX_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_MSIX_NXT_PTR_REG(8bit):
+ * MSIX Capability Next Capability Ptr
+ */
+
+#define PCR_BB_MSIX_NXT_PTR_REG 0x00000091
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_NXT_PTR_REG 0x000000b1
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_NXT_PTR_LBN 0
+#define PCRF_BZ_MSIX_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_MSIX_CTL_REG(16bit):
+ * MSIX control register
+ */
+
+#define PCR_BB_MSIX_CTL_REG 0x00000092
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_CTL_REG 0x000000b2
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_EN_LBN 15
+#define PCRF_BZ_MSIX_EN_WIDTH 1
+#define PCRF_BZ_MSIX_FUNC_MASK_LBN 14
+#define PCRF_BZ_MSIX_FUNC_MASK_WIDTH 1
+#define PCRF_BZ_MSIX_TBL_SIZE_LBN 0
+#define PCRF_BZ_MSIX_TBL_SIZE_WIDTH 11
+
+
+/*
+ * PC_MSIX_TBL_BASE_REG(32bit):
+ * MSIX Capability Vector Table Base
+ */
+
+#define PCR_BB_MSIX_TBL_BASE_REG 0x00000094
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_TBL_BASE_REG 0x000000b4
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_TBL_OFF_LBN 3
+#define PCRF_BZ_MSIX_TBL_OFF_WIDTH 29
+#define PCRF_BZ_MSIX_TBL_BIR_LBN 0
+#define PCRF_BZ_MSIX_TBL_BIR_WIDTH 3
+
+
+/*
+ * PC_DEV_CAP2_REG(32bit):
+ * PCIe Device Capabilities 2
+ */
+
+#define PCR_CZ_DEV_CAP2_REG 0x00000094
+/* sienaa0=pci_f0_config,hunta0=pci_f0_config */
+
+#define PCRF_DZ_OBFF_SUPPORTED_LBN 18
+#define PCRF_DZ_OBFF_SUPPORTED_WIDTH 2
+#define PCRF_DZ_TPH_CMPL_SUPPORTED_LBN 12
+#define PCRF_DZ_TPH_CMPL_SUPPORTED_WIDTH 2
+#define PCRF_DZ_LTR_M_SUPPORTED_LBN 11
+#define PCRF_DZ_LTR_M_SUPPORTED_WIDTH 1
+#define PCRF_CC_CMPL_TIMEOUT_DIS_LBN 4
+#define PCRF_CC_CMPL_TIMEOUT_DIS_WIDTH 1
+#define PCRF_DZ_CMPL_TIMEOUT_DIS_SUPPORTED_LBN 4
+#define PCRF_DZ_CMPL_TIMEOUT_DIS_SUPPORTED_WIDTH 1
+#define PCRF_CZ_CMPL_TIMEOUT_LBN 0
+#define PCRF_CZ_CMPL_TIMEOUT_WIDTH 4
+#define PCFE_CZ_CMPL_TIMEOUT_17000_TO_6400MS 14
+#define PCFE_CZ_CMPL_TIMEOUT_4000_TO_1300MS 13
+#define PCFE_CZ_CMPL_TIMEOUT_1000_TO_3500MS 10
+#define PCFE_CZ_CMPL_TIMEOUT_260_TO_900MS 9
+#define PCFE_CZ_CMPL_TIMEOUT_65_TO_210MS 6
+#define PCFE_CZ_CMPL_TIMEOUT_16_TO_55MS 5
+#define PCFE_CZ_CMPL_TIMEOUT_1_TO_10MS 2
+#define PCFE_CZ_CMPL_TIMEOUT_50_TO_100US 1
+#define PCFE_CZ_CMPL_TIMEOUT_DEFAULT 0
+
+
+/*
+ * PC_DEV_CTL2_REG(16bit):
+ * PCIe Device Control 2
+ */
+
+#define PCR_CZ_DEV_CTL2_REG 0x00000098
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_DZ_OBFF_ENABLE_LBN 13
+#define PCRF_DZ_OBFF_ENABLE_WIDTH 2
+#define PCRF_DZ_LTR_ENABLE_LBN 10
+#define PCRF_DZ_LTR_ENABLE_WIDTH 1
+#define PCRF_DZ_IDO_COMPLETION_ENABLE_LBN 9
+#define PCRF_DZ_IDO_COMPLETION_ENABLE_WIDTH 1
+#define PCRF_DZ_IDO_REQUEST_ENABLE_LBN 8
+#define PCRF_DZ_IDO_REQUEST_ENABLE_WIDTH 1
+#define PCRF_CZ_CMPL_TIMEOUT_DIS_CTL_LBN 4
+#define PCRF_CZ_CMPL_TIMEOUT_DIS_CTL_WIDTH 1
+#define PCRF_CZ_CMPL_TIMEOUT_CTL_LBN 0
+#define PCRF_CZ_CMPL_TIMEOUT_CTL_WIDTH 4
+
+
+/*
+ * PC_MSIX_PBA_BASE_REG(32bit):
+ * MSIX Capability PBA Base
+ */
+
+#define PCR_BB_MSIX_PBA_BASE_REG 0x00000098
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_PBA_BASE_REG 0x000000b8
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_PBA_OFF_LBN 3
+#define PCRF_BZ_MSIX_PBA_OFF_WIDTH 29
+#define PCRF_BZ_MSIX_PBA_BIR_LBN 0
+#define PCRF_BZ_MSIX_PBA_BIR_WIDTH 3
+
+
+/*
+ * PC_LNK_CAP2_REG(32bit):
+ * PCIe Link Capability 2
+ */
+
+#define PCR_DZ_LNK_CAP2_REG 0x0000009c
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LNK_SPEED_SUP_LBN 1
+#define PCRF_DZ_LNK_SPEED_SUP_WIDTH 7
+
+
+/*
+ * PC_LNK_CTL2_REG(16bit):
+ * PCIe Link Control 2
+ */
+
+#define PCR_CZ_LNK_CTL2_REG 0x000000a0
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_POLLING_DEEMPH_LVL_LBN 12
+#define PCRF_CZ_POLLING_DEEMPH_LVL_WIDTH 1
+#define PCRF_CZ_COMPLIANCE_SOS_CTL_LBN 11
+#define PCRF_CZ_COMPLIANCE_SOS_CTL_WIDTH 1
+#define PCRF_CZ_ENTER_MODIFIED_COMPLIANCE_CTL_LBN 10
+#define PCRF_CZ_ENTER_MODIFIED_COMPLIANCE_CTL_WIDTH 1
+#define PCRF_CZ_TRANSMIT_MARGIN_LBN 7
+#define PCRF_CZ_TRANSMIT_MARGIN_WIDTH 3
+#define PCRF_CZ_SELECT_DEEMPH_LBN 6
+#define PCRF_CZ_SELECT_DEEMPH_WIDTH 1
+#define PCRF_CZ_HW_AUTONOMOUS_SPEED_DIS_LBN 5
+#define PCRF_CZ_HW_AUTONOMOUS_SPEED_DIS_WIDTH 1
+#define PCRF_CZ_ENTER_COMPLIANCE_CTL_LBN 4
+#define PCRF_CZ_ENTER_COMPLIANCE_CTL_WIDTH 1
+#define PCRF_CZ_TGT_LNK_SPEED_CTL_LBN 0
+#define PCRF_CZ_TGT_LNK_SPEED_CTL_WIDTH 4
+#define PCFE_DZ_LCTL2_TGT_SPEED_GEN3 3
+#define PCFE_DZ_LCTL2_TGT_SPEED_GEN2 2
+#define PCFE_DZ_LCTL2_TGT_SPEED_GEN1 1
+
+
+/*
+ * PC_LNK_STAT2_REG(16bit):
+ * PCIe Link Status 2
+ */
+
+#define PCR_CZ_LNK_STAT2_REG 0x000000a2
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_CURRENT_DEEMPH_LBN 0
+#define PCRF_CZ_CURRENT_DEEMPH_WIDTH 1
+
+
+/*
+ * PC_VPD_CAP_ID_REG(8bit):
+ * VPD data register
+ */
+
+#define PCR_AB_VPD_CAP_ID_REG 0x000000b0
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_VPD_CAP_ID_LBN 0
+#define PCRF_AB_VPD_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_VPD_NXT_PTR_REG(8bit):
+ * VPD next item pointer
+ */
+
+#define PCR_AB_VPD_NXT_PTR_REG 0x000000b1
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_VPD_NXT_PTR_LBN 0
+#define PCRF_AB_VPD_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_VPD_ADDR_REG(16bit):
+ * VPD address register
+ */
+
+#define PCR_AB_VPD_ADDR_REG 0x000000b2
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_VPD_FLAG_LBN 15
+#define PCRF_AB_VPD_FLAG_WIDTH 1
+#define PCRF_AB_VPD_ADDR_LBN 0
+#define PCRF_AB_VPD_ADDR_WIDTH 15
+
+
+/*
+ * PC_VPD_CAP_DATA_REG(32bit):
+ * documentation to be written for sum_PC_VPD_CAP_DATA_REG
+ */
+
+#define PCR_AB_VPD_CAP_DATA_REG 0x000000b4
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_VPD_CAP_DATA_REG 0x000000d4
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_VPD_DATA_LBN 0
+#define PCRF_AZ_VPD_DATA_WIDTH 32
+
+
+/*
+ * PC_VPD_CAP_CTL_REG(8bit):
+ * VPD control and capabilities register
+ */
+
+#define PCR_CZ_VPD_CAP_CTL_REG 0x000000d0
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_VPD_FLAG_LBN 31
+#define PCRF_CZ_VPD_FLAG_WIDTH 1
+#define PCRF_CZ_VPD_ADDR_LBN 16
+#define PCRF_CZ_VPD_ADDR_WIDTH 15
+#define PCRF_CZ_VPD_NXT_PTR_LBN 8
+#define PCRF_CZ_VPD_NXT_PTR_WIDTH 8
+#define PCRF_CZ_VPD_CAP_ID_LBN 0
+#define PCRF_CZ_VPD_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_AER_CAP_HDR_REG(32bit):
+ * AER capability header register
+ */
+
+#define PCR_AZ_AER_CAP_HDR_REG 0x00000100
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_AERCAPHDR_NXT_PTR_LBN 20
+#define PCRF_AZ_AERCAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_AZ_AERCAPHDR_VER_LBN 16
+#define PCRF_AZ_AERCAPHDR_VER_WIDTH 4
+#define PCRF_AZ_AERCAPHDR_ID_LBN 0
+#define PCRF_AZ_AERCAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_AER_UNCORR_ERR_STAT_REG(32bit):
+ * AER Uncorrectable error status register
+ */
+
+#define PCR_AZ_AER_UNCORR_ERR_STAT_REG 0x00000104
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_UNSUPT_REQ_ERR_STAT_LBN 20
+#define PCRF_AZ_UNSUPT_REQ_ERR_STAT_WIDTH 1
+#define PCRF_AZ_ECRC_ERR_STAT_LBN 19
+#define PCRF_AZ_ECRC_ERR_STAT_WIDTH 1
+#define PCRF_AZ_MALF_TLP_STAT_LBN 18
+#define PCRF_AZ_MALF_TLP_STAT_WIDTH 1
+#define PCRF_AZ_RX_OVF_STAT_LBN 17
+#define PCRF_AZ_RX_OVF_STAT_WIDTH 1
+#define PCRF_AZ_UNEXP_COMP_STAT_LBN 16
+#define PCRF_AZ_UNEXP_COMP_STAT_WIDTH 1
+#define PCRF_AZ_COMP_ABRT_STAT_LBN 15
+#define PCRF_AZ_COMP_ABRT_STAT_WIDTH 1
+#define PCRF_AZ_COMP_TIMEOUT_STAT_LBN 14
+#define PCRF_AZ_COMP_TIMEOUT_STAT_WIDTH 1
+#define PCRF_AZ_FC_PROTO_ERR_STAT_LBN 13
+#define PCRF_AZ_FC_PROTO_ERR_STAT_WIDTH 1
+#define PCRF_AZ_PSON_TLP_STAT_LBN 12
+#define PCRF_AZ_PSON_TLP_STAT_WIDTH 1
+#define PCRF_AZ_DL_PROTO_ERR_STAT_LBN 4
+#define PCRF_AZ_DL_PROTO_ERR_STAT_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_STAT_LBN 0
+#define PCRF_AB_TRAIN_ERR_STAT_WIDTH 1
+
+
+/*
+ * PC_AER_UNCORR_ERR_MASK_REG(32bit):
+ * AER Uncorrectable error mask register
+ */
+
+#define PCR_AZ_AER_UNCORR_ERR_MASK_REG 0x00000108
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_DZ_ATOMIC_OP_EGR_BLOCKED_MASK_LBN 24
+#define PCRF_DZ_ATOMIC_OP_EGR_BLOCKED_MASK_WIDTH 1
+#define PCRF_DZ_UNCORR_INT_ERR_MASK_LBN 22
+#define PCRF_DZ_UNCORR_INT_ERR_MASK_WIDTH 1
+#define PCRF_AZ_UNSUPT_REQ_ERR_MASK_LBN 20
+#define PCRF_AZ_UNSUPT_REQ_ERR_MASK_WIDTH 1
+#define PCRF_AZ_ECRC_ERR_MASK_LBN 19
+#define PCRF_AZ_ECRC_ERR_MASK_WIDTH 1
+#define PCRF_AZ_MALF_TLP_MASK_LBN 18
+#define PCRF_AZ_MALF_TLP_MASK_WIDTH 1
+#define PCRF_AZ_RX_OVF_MASK_LBN 17
+#define PCRF_AZ_RX_OVF_MASK_WIDTH 1
+#define PCRF_AZ_UNEXP_COMP_MASK_LBN 16
+#define PCRF_AZ_UNEXP_COMP_MASK_WIDTH 1
+#define PCRF_AZ_COMP_ABRT_MASK_LBN 15
+#define PCRF_AZ_COMP_ABRT_MASK_WIDTH 1
+#define PCRF_AZ_COMP_TIMEOUT_MASK_LBN 14
+#define PCRF_AZ_COMP_TIMEOUT_MASK_WIDTH 1
+#define PCRF_AZ_FC_PROTO_ERR_MASK_LBN 13
+#define PCRF_AZ_FC_PROTO_ERR_MASK_WIDTH 1
+#define PCRF_AZ_PSON_TLP_MASK_LBN 12
+#define PCRF_AZ_PSON_TLP_MASK_WIDTH 1
+#define PCRF_AZ_DL_PROTO_ERR_MASK_LBN 4
+#define PCRF_AZ_DL_PROTO_ERR_MASK_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_MASK_LBN 0
+#define PCRF_AB_TRAIN_ERR_MASK_WIDTH 1
+
+
+/*
+ * PC_AER_UNCORR_ERR_SEV_REG(32bit):
+ * AER Uncorrectable error severity register
+ */
+
+#define PCR_AZ_AER_UNCORR_ERR_SEV_REG 0x0000010c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_UNSUPT_REQ_ERR_SEV_LBN 20
+#define PCRF_AZ_UNSUPT_REQ_ERR_SEV_WIDTH 1
+#define PCRF_AZ_ECRC_ERR_SEV_LBN 19
+#define PCRF_AZ_ECRC_ERR_SEV_WIDTH 1
+#define PCRF_AZ_MALF_TLP_SEV_LBN 18
+#define PCRF_AZ_MALF_TLP_SEV_WIDTH 1
+#define PCRF_AZ_RX_OVF_SEV_LBN 17
+#define PCRF_AZ_RX_OVF_SEV_WIDTH 1
+#define PCRF_AZ_UNEXP_COMP_SEV_LBN 16
+#define PCRF_AZ_UNEXP_COMP_SEV_WIDTH 1
+#define PCRF_AZ_COMP_ABRT_SEV_LBN 15
+#define PCRF_AZ_COMP_ABRT_SEV_WIDTH 1
+#define PCRF_AZ_COMP_TIMEOUT_SEV_LBN 14
+#define PCRF_AZ_COMP_TIMEOUT_SEV_WIDTH 1
+#define PCRF_AZ_FC_PROTO_ERR_SEV_LBN 13
+#define PCRF_AZ_FC_PROTO_ERR_SEV_WIDTH 1
+#define PCRF_AZ_PSON_TLP_SEV_LBN 12
+#define PCRF_AZ_PSON_TLP_SEV_WIDTH 1
+#define PCRF_AZ_DL_PROTO_ERR_SEV_LBN 4
+#define PCRF_AZ_DL_PROTO_ERR_SEV_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_SEV_LBN 0
+#define PCRF_AB_TRAIN_ERR_SEV_WIDTH 1
+
+
+/*
+ * PC_AER_CORR_ERR_STAT_REG(32bit):
+ * AER Correctable error status register
+ */
+
+#define PCR_AZ_AER_CORR_ERR_STAT_REG 0x00000110
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ADVSY_NON_FATAL_STAT_LBN 13
+#define PCRF_CZ_ADVSY_NON_FATAL_STAT_WIDTH 1
+#define PCRF_AZ_RPLY_TMR_TOUT_STAT_LBN 12
+#define PCRF_AZ_RPLY_TMR_TOUT_STAT_WIDTH 1
+#define PCRF_AZ_RPLAY_NUM_RO_STAT_LBN 8
+#define PCRF_AZ_RPLAY_NUM_RO_STAT_WIDTH 1
+#define PCRF_AZ_BAD_DLLP_STAT_LBN 7
+#define PCRF_AZ_BAD_DLLP_STAT_WIDTH 1
+#define PCRF_AZ_BAD_TLP_STAT_LBN 6
+#define PCRF_AZ_BAD_TLP_STAT_WIDTH 1
+#define PCRF_AZ_RX_ERR_STAT_LBN 0
+#define PCRF_AZ_RX_ERR_STAT_WIDTH 1
+
+
+/*
+ * PC_AER_CORR_ERR_MASK_REG(32bit):
+ * AER Correctable error status register
+ */
+
+#define PCR_AZ_AER_CORR_ERR_MASK_REG 0x00000114
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ADVSY_NON_FATAL_MASK_LBN 13
+#define PCRF_CZ_ADVSY_NON_FATAL_MASK_WIDTH 1
+#define PCRF_AZ_RPLY_TMR_TOUT_MASK_LBN 12
+#define PCRF_AZ_RPLY_TMR_TOUT_MASK_WIDTH 1
+#define PCRF_AZ_RPLAY_NUM_RO_MASK_LBN 8
+#define PCRF_AZ_RPLAY_NUM_RO_MASK_WIDTH 1
+#define PCRF_AZ_BAD_DLLP_MASK_LBN 7
+#define PCRF_AZ_BAD_DLLP_MASK_WIDTH 1
+#define PCRF_AZ_BAD_TLP_MASK_LBN 6
+#define PCRF_AZ_BAD_TLP_MASK_WIDTH 1
+#define PCRF_AZ_RX_ERR_MASK_LBN 0
+#define PCRF_AZ_RX_ERR_MASK_WIDTH 1
+
+
+/*
+ * PC_AER_CAP_CTL_REG(32bit):
+ * AER capability and control register
+ */
+
+#define PCR_AZ_AER_CAP_CTL_REG 0x00000118
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_ECRC_CHK_EN_LBN 8
+#define PCRF_AZ_ECRC_CHK_EN_WIDTH 1
+#define PCRF_AZ_ECRC_CHK_CAP_LBN 7
+#define PCRF_AZ_ECRC_CHK_CAP_WIDTH 1
+#define PCRF_AZ_ECRC_GEN_EN_LBN 6
+#define PCRF_AZ_ECRC_GEN_EN_WIDTH 1
+#define PCRF_AZ_ECRC_GEN_CAP_LBN 5
+#define PCRF_AZ_ECRC_GEN_CAP_WIDTH 1
+#define PCRF_AZ_1ST_ERR_PTR_LBN 0
+#define PCRF_AZ_1ST_ERR_PTR_WIDTH 5
+
+
+/*
+ * PC_AER_HDR_LOG_REG(128bit):
+ * AER Header log register
+ */
+
+#define PCR_AZ_AER_HDR_LOG_REG 0x0000011c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_HDR_LOG_LBN 0
+#define PCRF_AZ_HDR_LOG_WIDTH 128
+
+
+/*
+ * PC_DEVSN_CAP_HDR_REG(32bit):
+ * Device serial number capability header register
+ */
+
+#define PCR_CZ_DEVSN_CAP_HDR_REG 0x00000140
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_DEVSNCAPHDR_NXT_PTR_LBN 20
+#define PCRF_CZ_DEVSNCAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_CZ_DEVSNCAPHDR_VER_LBN 16
+#define PCRF_CZ_DEVSNCAPHDR_VER_WIDTH 4
+#define PCRF_CZ_DEVSNCAPHDR_ID_LBN 0
+#define PCRF_CZ_DEVSNCAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_DEVSN_DWORD0_REG(32bit):
+ * Device serial number DWORD0
+ */
+
+#define PCR_CZ_DEVSN_DWORD0_REG 0x00000144
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_DEVSN_DWORD0_LBN 0
+#define PCRF_CZ_DEVSN_DWORD0_WIDTH 32
+
+
+/*
+ * PC_DEVSN_DWORD1_REG(32bit):
+ * Device serial number DWORD0
+ */
+
+#define PCR_CZ_DEVSN_DWORD1_REG 0x00000148
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_DEVSN_DWORD1_LBN 0
+#define PCRF_CZ_DEVSN_DWORD1_WIDTH 32
+
+
+/*
+ * PC_ARI_CAP_HDR_REG(32bit):
+ * ARI capability header register
+ */
+
+#define PCR_CZ_ARI_CAP_HDR_REG 0x00000150
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ARICAPHDR_NXT_PTR_LBN 20
+#define PCRF_CZ_ARICAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_CZ_ARICAPHDR_VER_LBN 16
+#define PCRF_CZ_ARICAPHDR_VER_WIDTH 4
+#define PCRF_CZ_ARICAPHDR_ID_LBN 0
+#define PCRF_CZ_ARICAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_ARI_CAP_REG(16bit):
+ * ARI Capabilities
+ */
+
+#define PCR_CZ_ARI_CAP_REG 0x00000154
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ARI_NXT_FN_NUM_LBN 8
+#define PCRF_CZ_ARI_NXT_FN_NUM_WIDTH 8
+#define PCRF_CZ_ARI_ACS_FNGRP_CAP_LBN 1
+#define PCRF_CZ_ARI_ACS_FNGRP_CAP_WIDTH 1
+#define PCRF_CZ_ARI_MFVC_FNGRP_CAP_LBN 0
+#define PCRF_CZ_ARI_MFVC_FNGRP_CAP_WIDTH 1
+
+
+/*
+ * PC_ARI_CTL_REG(16bit):
+ * ARI Control
+ */
+
+#define PCR_CZ_ARI_CTL_REG 0x00000156
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ARI_FN_GRP_LBN 4
+#define PCRF_CZ_ARI_FN_GRP_WIDTH 3
+#define PCRF_CZ_ARI_ACS_FNGRP_EN_LBN 1
+#define PCRF_CZ_ARI_ACS_FNGRP_EN_WIDTH 1
+#define PCRF_CZ_ARI_MFVC_FNGRP_EN_LBN 0
+#define PCRF_CZ_ARI_MFVC_FNGRP_EN_WIDTH 1
+
+
+/*
+ * PC_SEC_PCIE_CAP_REG(32bit):
+ * Secondary PCIE Capability Register
+ */
+
+#define PCR_DZ_SEC_PCIE_CAP_REG 0x00000160
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_SEC_NXT_PTR_LBN 20
+#define PCRF_DZ_SEC_NXT_PTR_WIDTH 12
+#define PCRF_DZ_SEC_VERSION_LBN 16
+#define PCRF_DZ_SEC_VERSION_WIDTH 4
+#define PCRF_DZ_SEC_EXT_CAP_ID_LBN 0
+#define PCRF_DZ_SEC_EXT_CAP_ID_WIDTH 16
+
+
+/*
+ * PC_SRIOV_CAP_HDR_REG(32bit):
+ * SRIOV capability header register
+ */
+
+#define PCR_CC_SRIOV_CAP_HDR_REG 0x00000160
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_CAP_HDR_REG 0x00000180
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_SRIOVCAPHDR_NXT_PTR_LBN 20
+#define PCRF_CZ_SRIOVCAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_CZ_SRIOVCAPHDR_VER_LBN 16
+#define PCRF_CZ_SRIOVCAPHDR_VER_WIDTH 4
+#define PCRF_CZ_SRIOVCAPHDR_ID_LBN 0
+#define PCRF_CZ_SRIOVCAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_SRIOV_CAP_REG(32bit):
+ * SRIOV Capabilities
+ */
+
+#define PCR_CC_SRIOV_CAP_REG 0x00000164
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_CAP_REG 0x00000184
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_MIGR_INT_MSG_NUM_LBN 21
+#define PCRF_CZ_VF_MIGR_INT_MSG_NUM_WIDTH 11
+#define PCRF_DZ_VF_ARI_CAP_PRESV_LBN 1
+#define PCRF_DZ_VF_ARI_CAP_PRESV_WIDTH 1
+#define PCRF_CZ_VF_MIGR_CAP_LBN 0
+#define PCRF_CZ_VF_MIGR_CAP_WIDTH 1
+
+
+/*
+ * PC_LINK_CONTROL3_REG(32bit):
+ * Link Control 3.
+ */
+
+#define PCR_DZ_LINK_CONTROL3_REG 0x00000164
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LINK_EQ_INT_EN_LBN 1
+#define PCRF_DZ_LINK_EQ_INT_EN_WIDTH 1
+#define PCRF_DZ_PERFORM_EQL_LBN 0
+#define PCRF_DZ_PERFORM_EQL_WIDTH 1
+
+
+/*
+ * PC_LANE_ERROR_STAT_REG(32bit):
+ * Lane Error Status Register.
+ */
+
+#define PCR_DZ_LANE_ERROR_STAT_REG 0x00000168
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE_STATUS_LBN 0
+#define PCRF_DZ_LANE_STATUS_WIDTH 8
+
+
+/*
+ * PC_SRIOV_CTL_REG(16bit):
+ * SRIOV Control
+ */
+
+#define PCR_CC_SRIOV_CTL_REG 0x00000168
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_CTL_REG 0x00000188
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_ARI_CAP_HRCHY_LBN 4
+#define PCRF_CZ_VF_ARI_CAP_HRCHY_WIDTH 1
+#define PCRF_CZ_VF_MSE_LBN 3
+#define PCRF_CZ_VF_MSE_WIDTH 1
+#define PCRF_CZ_VF_MIGR_INT_EN_LBN 2
+#define PCRF_CZ_VF_MIGR_INT_EN_WIDTH 1
+#define PCRF_CZ_VF_MIGR_EN_LBN 1
+#define PCRF_CZ_VF_MIGR_EN_WIDTH 1
+#define PCRF_CZ_VF_EN_LBN 0
+#define PCRF_CZ_VF_EN_WIDTH 1
+
+
+/*
+ * PC_SRIOV_STAT_REG(16bit):
+ * SRIOV Status
+ */
+
+#define PCR_CC_SRIOV_STAT_REG 0x0000016a
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_STAT_REG 0x0000018a
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_MIGR_STAT_LBN 0
+#define PCRF_CZ_VF_MIGR_STAT_WIDTH 1
+
+
+/*
+ * PC_LANE01_EQU_CONTROL_REG(32bit):
+ * Lanes 0,1 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE01_EQU_CONTROL_REG 0x0000016c
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE1_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE1_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE0_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE0_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_INITIALVFS_REG(16bit):
+ * SRIOV Initial VFs
+ */
+
+#define PCR_CC_SRIOV_INITIALVFS_REG 0x0000016c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_INITIALVFS_REG 0x0000018c
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_INITIALVFS_LBN 0
+#define PCRF_CZ_VF_INITIALVFS_WIDTH 16
+
+
+/*
+ * PC_SRIOV_TOTALVFS_REG(10bit):
+ * SRIOV Total VFs
+ */
+
+#define PCR_CC_SRIOV_TOTALVFS_REG 0x0000016e
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_TOTALVFS_REG 0x0000018e
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_TOTALVFS_LBN 0
+#define PCRF_CZ_VF_TOTALVFS_WIDTH 16
+
+
+/*
+ * PC_SRIOV_NUMVFS_REG(16bit):
+ * SRIOV Number of VFs
+ */
+
+#define PCR_CC_SRIOV_NUMVFS_REG 0x00000170
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_NUMVFS_REG 0x00000190
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_NUMVFS_LBN 0
+#define PCRF_CZ_VF_NUMVFS_WIDTH 16
+
+
+/*
+ * PC_LANE23_EQU_CONTROL_REG(32bit):
+ * Lanes 2,3 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE23_EQU_CONTROL_REG 0x00000170
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE3_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE3_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE2_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE2_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_FN_DPND_LNK_REG(16bit):
+ * SRIOV Function dependency link
+ */
+
+#define PCR_CC_SRIOV_FN_DPND_LNK_REG 0x00000172
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_FN_DPND_LNK_REG 0x00000192
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_SRIOV_FN_DPND_LNK_LBN 0
+#define PCRF_CZ_SRIOV_FN_DPND_LNK_WIDTH 8
+
+
+/*
+ * PC_SRIOV_1STVF_OFFSET_REG(16bit):
+ * SRIOV First VF Offset
+ */
+
+#define PCR_CC_SRIOV_1STVF_OFFSET_REG 0x00000174
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_1STVF_OFFSET_REG 0x00000194
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_1STVF_OFFSET_LBN 0
+#define PCRF_CZ_VF_1STVF_OFFSET_WIDTH 16
+
+
+/*
+ * PC_LANE45_EQU_CONTROL_REG(32bit):
+ * Lanes 4,5 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE45_EQU_CONTROL_REG 0x00000174
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE5_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE5_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE4_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE4_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_VFSTRIDE_REG(16bit):
+ * SRIOV VF Stride
+ */
+
+#define PCR_CC_SRIOV_VFSTRIDE_REG 0x00000176
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_VFSTRIDE_REG 0x00000196
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_VFSTRIDE_LBN 0
+#define PCRF_CZ_VF_VFSTRIDE_WIDTH 16
+
+
+/*
+ * PC_LANE67_EQU_CONTROL_REG(32bit):
+ * Lanes 6,7 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE67_EQU_CONTROL_REG 0x00000178
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE7_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE7_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE6_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE6_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_DEVID_REG(16bit):
+ * SRIOV VF Device ID
+ */
+
+#define PCR_CC_SRIOV_DEVID_REG 0x0000017a
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_DEVID_REG 0x0000019a
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_DEVID_LBN 0
+#define PCRF_CZ_VF_DEVID_WIDTH 16
+
+
+/*
+ * PC_SRIOV_SUP_PAGESZ_REG(16bit):
+ * SRIOV Supported Page Sizes
+ */
+
+#define PCR_CC_SRIOV_SUP_PAGESZ_REG 0x0000017c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_SUP_PAGESZ_REG 0x0000019c
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_SUP_PAGESZ_LBN 0
+#define PCRF_CZ_VF_SUP_PAGESZ_WIDTH 16
+
+
+/*
+ * PC_SRIOV_SYS_PAGESZ_REG(32bit):
+ * SRIOV System Page Size
+ */
+
+#define PCR_CC_SRIOV_SYS_PAGESZ_REG 0x00000180
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_SYS_PAGESZ_REG 0x000001a0
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_SYS_PAGESZ_LBN 0
+#define PCRF_CZ_VF_SYS_PAGESZ_WIDTH 16
+
+
+/*
+ * PC_SRIOV_BAR0_REG(32bit):
+ * SRIOV VF Bar0
+ */
+
+#define PCR_CC_SRIOV_BAR0_REG 0x00000184
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR0_REG 0x000001a4
+/* hunta0=pci_f0_config */
+
+#define PCRF_CC_VF_BAR_ADDRESS_LBN 0
+#define PCRF_CC_VF_BAR_ADDRESS_WIDTH 32
+#define PCRF_DZ_VF_BAR0_ADDRESS_LBN 4
+#define PCRF_DZ_VF_BAR0_ADDRESS_WIDTH 28
+#define PCRF_DZ_VF_BAR0_PREF_LBN 3
+#define PCRF_DZ_VF_BAR0_PREF_WIDTH 1
+#define PCRF_DZ_VF_BAR0_TYPE_LBN 1
+#define PCRF_DZ_VF_BAR0_TYPE_WIDTH 2
+#define PCRF_DZ_VF_BAR0_IOM_LBN 0
+#define PCRF_DZ_VF_BAR0_IOM_WIDTH 1
+
+
+/*
+ * PC_SRIOV_BAR1_REG(32bit):
+ * SRIOV Bar1
+ */
+
+#define PCR_CC_SRIOV_BAR1_REG 0x00000188
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR1_REG 0x000001a8
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR1_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR1_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR2_REG(32bit):
+ * SRIOV Bar2
+ */
+
+#define PCR_CC_SRIOV_BAR2_REG 0x0000018c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR2_REG 0x000001ac
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR2_ADDRESS_LBN 4
+#define PCRF_DZ_VF_BAR2_ADDRESS_WIDTH 28
+#define PCRF_DZ_VF_BAR2_PREF_LBN 3
+#define PCRF_DZ_VF_BAR2_PREF_WIDTH 1
+#define PCRF_DZ_VF_BAR2_TYPE_LBN 1
+#define PCRF_DZ_VF_BAR2_TYPE_WIDTH 2
+#define PCRF_DZ_VF_BAR2_IOM_LBN 0
+#define PCRF_DZ_VF_BAR2_IOM_WIDTH 1
+
+
+/*
+ * PC_SRIOV_BAR3_REG(32bit):
+ * SRIOV Bar3
+ */
+
+#define PCR_CC_SRIOV_BAR3_REG 0x00000190
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR3_REG 0x000001b0
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR3_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR3_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR4_REG(32bit):
+ * SRIOV Bar4
+ */
+
+#define PCR_CC_SRIOV_BAR4_REG 0x00000194
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR4_REG 0x000001b4
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR4_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR4_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR5_REG(32bit):
+ * SRIOV Bar5
+ */
+
+#define PCR_CC_SRIOV_BAR5_REG 0x00000198
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR5_REG 0x000001b8
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR5_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR5_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_RSVD_REG(16bit):
+ * Reserved register
+ */
+
+#define PCR_DZ_SRIOV_RSVD_REG 0x00000198
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_VF_RSVD_LBN 0
+#define PCRF_DZ_VF_RSVD_WIDTH 16
+
+
+/*
+ * PC_SRIOV_MIBR_SARRAY_OFFSET_REG(32bit):
+ * SRIOV VF Migration State Array Offset
+ */
+
+#define PCR_CC_SRIOV_MIBR_SARRAY_OFFSET_REG 0x0000019c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_MIBR_SARRAY_OFFSET_REG 0x000001bc
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_MIGR_OFFSET_LBN 3
+#define PCRF_CZ_VF_MIGR_OFFSET_WIDTH 29
+#define PCRF_CZ_VF_MIGR_BIR_LBN 0
+#define PCRF_CZ_VF_MIGR_BIR_WIDTH 3
+
+
+/*
+ * PC_TPH_CAP_HDR_REG(32bit):
+ * TPH Capability Header Register
+ */
+
+#define PCR_DZ_TPH_CAP_HDR_REG 0x000001c0
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_TPH_NXT_PTR_LBN 20
+#define PCRF_DZ_TPH_NXT_PTR_WIDTH 12
+#define PCRF_DZ_TPH_VERSION_LBN 16
+#define PCRF_DZ_TPH_VERSION_WIDTH 4
+#define PCRF_DZ_TPH_EXT_CAP_ID_LBN 0
+#define PCRF_DZ_TPH_EXT_CAP_ID_WIDTH 16
+
+
+/*
+ * PC_TPH_REQ_CAP_REG(32bit):
+ * TPH Requester Capability Register
+ */
+
+#define PCR_DZ_TPH_REQ_CAP_REG 0x000001c4
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_ST_TBLE_SIZE_LBN 16
+#define PCRF_DZ_ST_TBLE_SIZE_WIDTH 11
+#define PCRF_DZ_ST_TBLE_LOC_LBN 9
+#define PCRF_DZ_ST_TBLE_LOC_WIDTH 2
+#define PCRF_DZ_EXT_TPH_MODE_SUP_LBN 8
+#define PCRF_DZ_EXT_TPH_MODE_SUP_WIDTH 1
+#define PCRF_DZ_TPH_DEV_MODE_SUP_LBN 2
+#define PCRF_DZ_TPH_DEV_MODE_SUP_WIDTH 1
+#define PCRF_DZ_TPH_INT_MODE_SUP_LBN 1
+#define PCRF_DZ_TPH_INT_MODE_SUP_WIDTH 1
+#define PCRF_DZ_TPH_NOST_MODE_SUP_LBN 0
+#define PCRF_DZ_TPH_NOST_MODE_SUP_WIDTH 1
+
+
+/*
+ * PC_TPH_REQ_CTL_REG(32bit):
+ * TPH Requester Control Register
+ */
+
+#define PCR_DZ_TPH_REQ_CTL_REG 0x000001c8
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_TPH_REQ_ENABLE_LBN 8
+#define PCRF_DZ_TPH_REQ_ENABLE_WIDTH 2
+#define PCRF_DZ_TPH_ST_MODE_LBN 0
+#define PCRF_DZ_TPH_ST_MODE_WIDTH 3
+
+
+/*
+ * PC_LTR_CAP_HDR_REG(32bit):
+ * Latency Tolerance Reporting Cap Header Reg
+ */
+
+#define PCR_DZ_LTR_CAP_HDR_REG 0x00000290
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LTR_NXT_PTR_LBN 20
+#define PCRF_DZ_LTR_NXT_PTR_WIDTH 12
+#define PCRF_DZ_LTR_VERSION_LBN 16
+#define PCRF_DZ_LTR_VERSION_WIDTH 4
+#define PCRF_DZ_LTR_EXT_CAP_ID_LBN 0
+#define PCRF_DZ_LTR_EXT_CAP_ID_WIDTH 16
+
+
+/*
+ * PC_LTR_MAX_SNOOP_REG(32bit):
+ * LTR Maximum Snoop/No Snoop Register
+ */
+
+#define PCR_DZ_LTR_MAX_SNOOP_REG 0x00000294
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LTR_MAX_NOSNOOP_SCALE_LBN 26
+#define PCRF_DZ_LTR_MAX_NOSNOOP_SCALE_WIDTH 3
+#define PCRF_DZ_LTR_MAX_NOSNOOP_LAT_LBN 16
+#define PCRF_DZ_LTR_MAX_NOSNOOP_LAT_WIDTH 10
+#define PCRF_DZ_LTR_MAX_SNOOP_SCALE_LBN 10
+#define PCRF_DZ_LTR_MAX_SNOOP_SCALE_WIDTH 3
+#define PCRF_DZ_LTR_MAX_SNOOP_LAT_LBN 0
+#define PCRF_DZ_LTR_MAX_SNOOP_LAT_WIDTH 10
+
+
+/*
+ * PC_ACK_LAT_TMR_REG(32bit):
+ * ACK latency timer & replay timer register
+ */
+
+#define PCR_AC_ACK_LAT_TMR_REG 0x00000700
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_RT_LBN 16
+#define PCRF_AC_RT_WIDTH 16
+#define PCRF_AC_ALT_LBN 0
+#define PCRF_AC_ALT_WIDTH 16
+
+
+/*
+ * PC_OTHER_MSG_REG(32bit):
+ * Other message register
+ */
+
+#define PCR_AC_OTHER_MSG_REG 0x00000704
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_OM_CRPT3_LBN 24
+#define PCRF_AC_OM_CRPT3_WIDTH 8
+#define PCRF_AC_OM_CRPT2_LBN 16
+#define PCRF_AC_OM_CRPT2_WIDTH 8
+#define PCRF_AC_OM_CRPT1_LBN 8
+#define PCRF_AC_OM_CRPT1_WIDTH 8
+#define PCRF_AC_OM_CRPT0_LBN 0
+#define PCRF_AC_OM_CRPT0_WIDTH 8
+
+
+/*
+ * PC_FORCE_LNK_REG(24bit):
+ * Port force link register
+ */
+
+#define PCR_AC_FORCE_LNK_REG 0x00000708
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_LFS_LBN 16
+#define PCRF_AC_LFS_WIDTH 6
+#define PCRF_AC_FL_LBN 15
+#define PCRF_AC_FL_WIDTH 1
+#define PCRF_AC_LN_LBN 0
+#define PCRF_AC_LN_WIDTH 8
+
+
+/*
+ * PC_ACK_FREQ_REG(32bit):
+ * ACK frequency register
+ */
+
+#define PCR_AC_ACK_FREQ_REG 0x0000070c
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_CC_ALLOW_L1_WITHOUT_L0S_LBN 30
+#define PCRF_CC_ALLOW_L1_WITHOUT_L0S_WIDTH 1
+#define PCRF_AC_L1_ENTR_LAT_LBN 27
+#define PCRF_AC_L1_ENTR_LAT_WIDTH 3
+#define PCRF_AC_L0_ENTR_LAT_LBN 24
+#define PCRF_AC_L0_ENTR_LAT_WIDTH 3
+#define PCRF_CC_COMM_NFTS_LBN 16
+#define PCRF_CC_COMM_NFTS_WIDTH 8
+#define PCRF_AB_ACK_FREQ_REG_RSVD0_LBN 16
+#define PCRF_AB_ACK_FREQ_REG_RSVD0_WIDTH 3
+#define PCRF_AC_MAX_FTS_LBN 8
+#define PCRF_AC_MAX_FTS_WIDTH 8
+#define PCRF_AC_ACK_FREQ_LBN 0
+#define PCRF_AC_ACK_FREQ_WIDTH 8
+
+
+/*
+ * PC_PORT_LNK_CTL_REG(32bit):
+ * Port link control register
+ */
+
+#define PCR_AC_PORT_LNK_CTL_REG 0x00000710
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AB_LRE_LBN 27
+#define PCRF_AB_LRE_WIDTH 1
+#define PCRF_AB_ESYNC_LBN 26
+#define PCRF_AB_ESYNC_WIDTH 1
+#define PCRF_AB_CRPT_LBN 25
+#define PCRF_AB_CRPT_WIDTH 1
+#define PCRF_AB_XB_LBN 24
+#define PCRF_AB_XB_WIDTH 1
+#define PCRF_AC_LC_LBN 16
+#define PCRF_AC_LC_WIDTH 6
+#define PCRF_AC_LDR_LBN 8
+#define PCRF_AC_LDR_WIDTH 4
+#define PCRF_AC_FLM_LBN 7
+#define PCRF_AC_FLM_WIDTH 1
+#define PCRF_AC_LKD_LBN 6
+#define PCRF_AC_LKD_WIDTH 1
+#define PCRF_AC_DLE_LBN 5
+#define PCRF_AC_DLE_WIDTH 1
+#define PCRF_AB_PORT_LNK_CTL_REG_RSVD0_LBN 4
+#define PCRF_AB_PORT_LNK_CTL_REG_RSVD0_WIDTH 1
+#define PCRF_AC_RA_LBN 3
+#define PCRF_AC_RA_WIDTH 1
+#define PCRF_AC_LE_LBN 2
+#define PCRF_AC_LE_WIDTH 1
+#define PCRF_AC_SD_LBN 1
+#define PCRF_AC_SD_WIDTH 1
+#define PCRF_AC_OMR_LBN 0
+#define PCRF_AC_OMR_WIDTH 1
+
+
+/*
+ * PC_LN_SKEW_REG(32bit):
+ * Lane skew register
+ */
+
+#define PCR_AC_LN_SKEW_REG 0x00000714
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_DIS_LBN 31
+#define PCRF_AC_DIS_WIDTH 1
+#define PCRF_AB_RST_LBN 30
+#define PCRF_AB_RST_WIDTH 1
+#define PCRF_AC_AD_LBN 25
+#define PCRF_AC_AD_WIDTH 1
+#define PCRF_AC_FCD_LBN 24
+#define PCRF_AC_FCD_WIDTH 1
+#define PCRF_AC_LS2_LBN 16
+#define PCRF_AC_LS2_WIDTH 8
+#define PCRF_AC_LS1_LBN 8
+#define PCRF_AC_LS1_WIDTH 8
+#define PCRF_AC_LS0_LBN 0
+#define PCRF_AC_LS0_WIDTH 8
+
+
+/*
+ * PC_SYM_NUM_REG(16bit):
+ * Symbol number register
+ */
+
+#define PCR_AC_SYM_NUM_REG 0x00000718
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_CC_MAX_FUNCTIONS_LBN 29
+#define PCRF_CC_MAX_FUNCTIONS_WIDTH 3
+#define PCRF_CC_FC_WATCHDOG_TMR_LBN 24
+#define PCRF_CC_FC_WATCHDOG_TMR_WIDTH 5
+#define PCRF_CC_ACK_NAK_TMR_MOD_LBN 19
+#define PCRF_CC_ACK_NAK_TMR_MOD_WIDTH 5
+#define PCRF_CC_REPLAY_TMR_MOD_LBN 14
+#define PCRF_CC_REPLAY_TMR_MOD_WIDTH 5
+#define PCRF_AB_ES_LBN 12
+#define PCRF_AB_ES_WIDTH 3
+#define PCRF_AB_SYM_NUM_REG_RSVD0_LBN 11
+#define PCRF_AB_SYM_NUM_REG_RSVD0_WIDTH 1
+#define PCRF_CC_NUM_SKP_SYMS_LBN 8
+#define PCRF_CC_NUM_SKP_SYMS_WIDTH 3
+#define PCRF_AB_TS2_LBN 4
+#define PCRF_AB_TS2_WIDTH 4
+#define PCRF_AC_TS1_LBN 0
+#define PCRF_AC_TS1_WIDTH 4
+
+
+/*
+ * PC_SYM_TMR_FLT_MSK_REG(16bit):
+ * Symbol timer and Filter Mask Register
+ */
+
+#define PCR_CC_SYM_TMR_FLT_MSK_REG 0x0000071c
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CC_DEFAULT_FLT_MSK1_LBN 16
+#define PCRF_CC_DEFAULT_FLT_MSK1_WIDTH 16
+#define PCRF_CC_FC_WDOG_TMR_DIS_LBN 15
+#define PCRF_CC_FC_WDOG_TMR_DIS_WIDTH 1
+#define PCRF_CC_SI1_LBN 8
+#define PCRF_CC_SI1_WIDTH 3
+#define PCRF_CC_SKIP_INT_VAL_LBN 0
+#define PCRF_CC_SKIP_INT_VAL_WIDTH 11
+#define PCRF_CC_SI0_LBN 0
+#define PCRF_CC_SI0_WIDTH 8
+
+
+/*
+ * PC_SYM_TMR_REG(16bit):
+ * Symbol timer register
+ */
+
+#define PCR_AB_SYM_TMR_REG 0x0000071c
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_ET_LBN 11
+#define PCRF_AB_ET_WIDTH 4
+#define PCRF_AB_SI1_LBN 8
+#define PCRF_AB_SI1_WIDTH 3
+#define PCRF_AB_SI0_LBN 0
+#define PCRF_AB_SI0_WIDTH 8
+
+
+/*
+ * PC_FLT_MSK_REG(32bit):
+ * Filter Mask Register 2
+ */
+
+#define PCR_CC_FLT_MSK_REG 0x00000720
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CC_DEFAULT_FLT_MSK2_LBN 0
+#define PCRF_CC_DEFAULT_FLT_MSK2_WIDTH 32
+
+
+/*
+ * PC_PHY_STAT_REG(32bit):
+ * PHY status register
+ */
+
+#define PCR_AB_PHY_STAT_REG 0x00000720
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_PHY_STAT_REG 0x00000810
+/* sienaa0=pci_f0_config */
+
+#define PCRF_AC_SSL_LBN 3
+#define PCRF_AC_SSL_WIDTH 1
+#define PCRF_AC_SSR_LBN 2
+#define PCRF_AC_SSR_WIDTH 1
+#define PCRF_AC_SSCL_LBN 1
+#define PCRF_AC_SSCL_WIDTH 1
+#define PCRF_AC_SSCD_LBN 0
+#define PCRF_AC_SSCD_WIDTH 1
+
+
+/*
+ * PC_PHY_CTL_REG(32bit):
+ * PHY control register
+ */
+
+#define PCR_AB_PHY_CTL_REG 0x00000724
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_PHY_CTL_REG 0x00000814
+/* sienaa0=pci_f0_config */
+
+#define PCRF_AC_BD_LBN 31
+#define PCRF_AC_BD_WIDTH 1
+#define PCRF_AC_CDS_LBN 30
+#define PCRF_AC_CDS_WIDTH 1
+#define PCRF_AC_DWRAP_LB_LBN 29
+#define PCRF_AC_DWRAP_LB_WIDTH 1
+#define PCRF_AC_EBD_LBN 28
+#define PCRF_AC_EBD_WIDTH 1
+#define PCRF_AC_SNR_LBN 27
+#define PCRF_AC_SNR_WIDTH 1
+#define PCRF_AC_RX_NOT_DET_LBN 2
+#define PCRF_AC_RX_NOT_DET_WIDTH 1
+#define PCRF_AC_FORCE_LOS_VAL_LBN 1
+#define PCRF_AC_FORCE_LOS_VAL_WIDTH 1
+#define PCRF_AC_FORCE_LOS_EN_LBN 0
+#define PCRF_AC_FORCE_LOS_EN_WIDTH 1
+
+
+/*
+ * PC_DEBUG0_REG(32bit):
+ * Debug register 0
+ */
+
+#define PCR_AC_DEBUG0_REG 0x00000728
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_CDI03_LBN 24
+#define PCRF_AC_CDI03_WIDTH 8
+#define PCRF_AC_CDI0_LBN 0
+#define PCRF_AC_CDI0_WIDTH 32
+#define PCRF_AC_CDI02_LBN 16
+#define PCRF_AC_CDI02_WIDTH 8
+#define PCRF_AC_CDI01_LBN 8
+#define PCRF_AC_CDI01_WIDTH 8
+#define PCRF_AC_CDI00_LBN 0
+#define PCRF_AC_CDI00_WIDTH 8
+
+
+/*
+ * PC_DEBUG1_REG(32bit):
+ * Debug register 1
+ */
+
+#define PCR_AC_DEBUG1_REG 0x0000072c
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_CDI13_LBN 24
+#define PCRF_AC_CDI13_WIDTH 8
+#define PCRF_AC_CDI1_LBN 0
+#define PCRF_AC_CDI1_WIDTH 32
+#define PCRF_AC_CDI12_LBN 16
+#define PCRF_AC_CDI12_WIDTH 8
+#define PCRF_AC_CDI11_LBN 8
+#define PCRF_AC_CDI11_WIDTH 8
+#define PCRF_AC_CDI10_LBN 0
+#define PCRF_AC_CDI10_WIDTH 8
+
+
+/*
+ * PC_XPFCC_STAT_REG(24bit):
+ * documentation to be written for sum_PC_XPFCC_STAT_REG
+ */
+
+#define PCR_AC_XPFCC_STAT_REG 0x00000730
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_XPDC_LBN 12
+#define PCRF_AC_XPDC_WIDTH 8
+#define PCRF_AC_XPHC_LBN 0
+#define PCRF_AC_XPHC_WIDTH 12
+
+
+/*
+ * PC_XNPFCC_STAT_REG(24bit):
+ * documentation to be written for sum_PC_XNPFCC_STAT_REG
+ */
+
+#define PCR_AC_XNPFCC_STAT_REG 0x00000734
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_XNPDC_LBN 12
+#define PCRF_AC_XNPDC_WIDTH 8
+#define PCRF_AC_XNPHC_LBN 0
+#define PCRF_AC_XNPHC_WIDTH 12
+
+
+/*
+ * PC_XCFCC_STAT_REG(24bit):
+ * documentation to be written for sum_PC_XCFCC_STAT_REG
+ */
+
+#define PCR_AC_XCFCC_STAT_REG 0x00000738
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_XCDC_LBN 12
+#define PCRF_AC_XCDC_WIDTH 8
+#define PCRF_AC_XCHC_LBN 0
+#define PCRF_AC_XCHC_WIDTH 12
+
+
+/*
+ * PC_Q_STAT_REG(8bit):
+ * documentation to be written for sum_PC_Q_STAT_REG
+ */
+
+#define PCR_AC_Q_STAT_REG 0x0000073c
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_RQNE_LBN 2
+#define PCRF_AC_RQNE_WIDTH 1
+#define PCRF_AC_XRNE_LBN 1
+#define PCRF_AC_XRNE_WIDTH 1
+#define PCRF_AC_RCNR_LBN 0
+#define PCRF_AC_RCNR_WIDTH 1
+
+
+/*
+ * PC_VC_XMIT_ARB1_REG(32bit):
+ * VC Transmit Arbitration Register 1
+ */
+
+#define PCR_CC_VC_XMIT_ARB1_REG 0x00000740
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC_XMIT_ARB2_REG(32bit):
+ * VC Transmit Arbitration Register 2
+ */
+
+#define PCR_CC_VC_XMIT_ARB2_REG 0x00000744
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC0_P_RQ_CTL_REG(32bit):
+ * VC0 Posted Receive Queue Control
+ */
+
+#define PCR_CC_VC0_P_RQ_CTL_REG 0x00000748
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC0_NP_RQ_CTL_REG(32bit):
+ * VC0 Non-Posted Receive Queue Control
+ */
+
+#define PCR_CC_VC0_NP_RQ_CTL_REG 0x0000074c
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC0_C_RQ_CTL_REG(32bit):
+ * VC0 Completion Receive Queue Control
+ */
+
+#define PCR_CC_VC0_C_RQ_CTL_REG 0x00000750
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_GEN2_REG(32bit):
+ * Gen2 Register
+ */
+
+#define PCR_CC_GEN2_REG 0x0000080c
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CC_SET_DE_EMPHASIS_LBN 20
+#define PCRF_CC_SET_DE_EMPHASIS_WIDTH 1
+#define PCRF_CC_CFG_TX_COMPLIANCE_LBN 19
+#define PCRF_CC_CFG_TX_COMPLIANCE_WIDTH 1
+#define PCRF_CC_CFG_TX_SWING_LBN 18
+#define PCRF_CC_CFG_TX_SWING_WIDTH 1
+#define PCRF_CC_DIR_SPEED_CHANGE_LBN 17
+#define PCRF_CC_DIR_SPEED_CHANGE_WIDTH 1
+#define PCRF_CC_LANE_ENABLE_LBN 8
+#define PCRF_CC_LANE_ENABLE_WIDTH 9
+#define PCRF_CC_NUM_FTS_LBN 0
+#define PCRF_CC_NUM_FTS_WIDTH 8
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_REGS_PCI_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_rx.c b/src/seastar/dpdk/drivers/net/sfc/base/efx_rx.c
new file mode 100644
index 00000000..c8156341
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_rx.c
@@ -0,0 +1,1315 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_rx_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_rx_fini(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_RX_SCATTER
+static __checkReturn efx_rc_t
+siena_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+siena_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert);
+
+static __checkReturn efx_rc_t
+siena_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n);
+
+static __checkReturn efx_rc_t
+siena_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n);
+
+static __checkReturn uint32_t
+siena_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer);
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+static __checkReturn efx_rc_t
+siena_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp);
+
+static void
+siena_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added);
+
+static void
+siena_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+static void
+siena_rx_qps_update_credits(
+ __in efx_rxq_t *erp);
+
+static __checkReturn uint8_t *
+siena_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp);
+#endif
+
+static __checkReturn efx_rc_t
+siena_rx_qflush(
+ __in efx_rxq_t *erp);
+
+static void
+siena_rx_qenable(
+ __in efx_rxq_t *erp);
+
+static __checkReturn efx_rc_t
+siena_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp);
+
+static void
+siena_rx_qdestroy(
+ __in efx_rxq_t *erp);
+
+#endif /* EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+static const efx_rx_ops_t __efx_rx_siena_ops = {
+ siena_rx_init, /* erxo_init */
+ siena_rx_fini, /* erxo_fini */
+#if EFSYS_OPT_RX_SCATTER
+ siena_rx_scatter_enable, /* erxo_scatter_enable */
+#endif
+#if EFSYS_OPT_RX_SCALE
+ siena_rx_scale_mode_set, /* erxo_scale_mode_set */
+ siena_rx_scale_key_set, /* erxo_scale_key_set */
+ siena_rx_scale_tbl_set, /* erxo_scale_tbl_set */
+ siena_rx_prefix_hash, /* erxo_prefix_hash */
+#endif
+ siena_rx_prefix_pktlen, /* erxo_prefix_pktlen */
+ siena_rx_qpost, /* erxo_qpost */
+ siena_rx_qpush, /* erxo_qpush */
+#if EFSYS_OPT_RX_PACKED_STREAM
+ siena_rx_qps_update_credits, /* erxo_qps_update_credits */
+ siena_rx_qps_packet_info, /* erxo_qps_packet_info */
+#endif
+ siena_rx_qflush, /* erxo_qflush */
+ siena_rx_qenable, /* erxo_qenable */
+ siena_rx_qcreate, /* erxo_qcreate */
+ siena_rx_qdestroy, /* erxo_qdestroy */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_rx_ops_t __efx_rx_ef10_ops = {
+ ef10_rx_init, /* erxo_init */
+ ef10_rx_fini, /* erxo_fini */
+#if EFSYS_OPT_RX_SCATTER
+ ef10_rx_scatter_enable, /* erxo_scatter_enable */
+#endif
+#if EFSYS_OPT_RX_SCALE
+ ef10_rx_scale_mode_set, /* erxo_scale_mode_set */
+ ef10_rx_scale_key_set, /* erxo_scale_key_set */
+ ef10_rx_scale_tbl_set, /* erxo_scale_tbl_set */
+ ef10_rx_prefix_hash, /* erxo_prefix_hash */
+#endif
+ ef10_rx_prefix_pktlen, /* erxo_prefix_pktlen */
+ ef10_rx_qpost, /* erxo_qpost */
+ ef10_rx_qpush, /* erxo_qpush */
+#if EFSYS_OPT_RX_PACKED_STREAM
+ ef10_rx_qps_update_credits, /* erxo_qps_update_credits */
+ ef10_rx_qps_packet_info, /* erxo_qps_packet_info */
+#endif
+ ef10_rx_qflush, /* erxo_qflush */
+ ef10_rx_qenable, /* erxo_qenable */
+ ef10_rx_qcreate, /* erxo_qcreate */
+ ef10_rx_qdestroy, /* erxo_qdestroy */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+
+ __checkReturn efx_rc_t
+efx_rx_init(
+ __inout efx_nic_t *enp)
+{
+ const efx_rx_ops_t *erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (!(enp->en_mod_flags & EFX_MOD_EV)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_mod_flags & EFX_MOD_RX) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ erxop = &__efx_rx_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ erxop = &__efx_rx_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ erxop = &__efx_rx_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail3;
+ }
+
+ if ((rc = erxop->erxo_init(enp)) != 0)
+ goto fail4;
+
+ enp->en_erxop = erxop;
+ enp->en_mod_flags |= EFX_MOD_RX;
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_erxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_RX;
+ return (rc);
+}
+
+ void
+efx_rx_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+ EFSYS_ASSERT3U(enp->en_rx_qcount, ==, 0);
+
+ erxop->erxo_fini(enp);
+
+ enp->en_erxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_RX;
+}
+
+#if EFSYS_OPT_RX_SCATTER
+ __checkReturn efx_rc_t
+efx_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if ((rc = erxop->erxo_scatter_enable(enp, buf_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_hash_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_hash_support_t *supportp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if (supportp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Report if resources are available to insert RX hash value */
+ *supportp = enp->en_hash_support;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_scale_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_scale_support_t *supportp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if (supportp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Report if resources are available to support RSS */
+ *supportp = enp->en_rss_support;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if (erxop->erxo_scale_mode_set != NULL) {
+ if ((rc = erxop->erxo_scale_mode_set(enp, alg,
+ type, insert)) != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if ((rc = erxop->erxo_scale_key_set(enp, key, n)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if ((rc = erxop->erxo_scale_tbl_set(enp, table, n)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ void
+efx_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qpost(erp, addrp, size, n, completed, added);
+}
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+ void
+efx_rx_qps_update_credits(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qps_update_credits(erp);
+}
+
+ __checkReturn uint8_t *
+efx_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ return (erxop->erxo_qps_packet_info(erp, buffer,
+ buffer_length, current_offset, lengthp,
+ next_offsetp, timestamp));
+}
+
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+
+ void
+efx_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qpush(erp, added, pushedp);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_qflush(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ if ((rc = erxop->erxo_qflush(erp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_rx_qenable(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qenable(erp);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rxq_t *erp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ /* Allocate an RXQ object */
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_rxq_t), erp);
+
+ if (erp == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ erp->er_magic = EFX_RXQ_MAGIC;
+ erp->er_enp = enp;
+ erp->er_index = index;
+ erp->er_mask = n - 1;
+ erp->er_esmp = esmp;
+
+ if ((rc = erxop->erxo_qcreate(enp, index, label, type, esmp, n, id,
+ eep, erp)) != 0)
+ goto fail2;
+
+ enp->en_rx_qcount++;
+ *erpp = erp;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_rx_qdestroy(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qdestroy(erp);
+}
+
+ __checkReturn efx_rc_t
+efx_pseudo_hdr_pkt_length_get(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ return (erxop->erxo_prefix_pktlen(enp, buffer, lengthp));
+}
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn uint32_t
+efx_pseudo_hdr_hash_get(
+ __in efx_rxq_t *erp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ EFSYS_ASSERT3U(enp->en_hash_support, ==, EFX_RX_HASH_AVAILABLE);
+ return (erxop->erxo_prefix_hash(enp, func, buffer));
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_rx_init(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+ unsigned int index;
+
+ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_DESC_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, 0x3000 / 32);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ /* Zero the RSS table */
+ for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS;
+ index++) {
+ EFX_ZERO_OWORD(oword);
+ EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL,
+ index, &oword, B_TRUE);
+ }
+
+#if EFSYS_OPT_RX_SCALE
+ /* The RSS key and indirection table are writable. */
+ enp->en_rss_support = EFX_RX_SCALE_EXCLUSIVE;
+
+ /* Hardware can insert RX hash with/without RSS */
+ enp->en_hash_support = EFX_RX_HASH_AVAILABLE;
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ return (0);
+}
+
+#if EFSYS_OPT_RX_SCATTER
+static __checkReturn efx_rc_t
+siena_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size)
+{
+ unsigned int nbuf32;
+ efx_oword_t oword;
+ efx_rc_t rc;
+
+ nbuf32 = buf_size / 32;
+ if ((nbuf32 == 0) ||
+ (nbuf32 >= (1 << FRF_BZ_RX_USR_BUF_SIZE_WIDTH)) ||
+ ((buf_size % 32) != 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_rx_qcount > 0) {
+ rc = EBUSY;
+ goto fail2;
+ }
+
+ /* Set scatter buffer size */
+ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, nbuf32);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ /* Enable scatter for packets not matching a filter */
+ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+
+#define EFX_RX_LFSR_HASH(_enp, _insert) \
+ do { \
+ efx_oword_t oword; \
+ \
+ EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \
+ (_insert) ? 1 : 0); \
+ EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ \
+ if ((_enp)->en_family == EFX_FAMILY_SIENA) { \
+ EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \
+ &oword); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 0); \
+ EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \
+ &oword); \
+ } \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_RX_TOEPLITZ_IPV4_HASH(_enp, _insert, _ip, _tcp) \
+ do { \
+ efx_oword_t oword; \
+ \
+ EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 1); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, \
+ (_ip) ? 1 : 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, \
+ (_tcp) ? 0 : 1); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \
+ (_insert) ? 1 : 0); \
+ EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_RX_TOEPLITZ_IPV6_HASH(_enp, _ip, _tcp, _rc) \
+ do { \
+ efx_oword_t oword; \
+ \
+ EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, (_ip) ? 1 : 0); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS, (_tcp) ? 0 : 1); \
+ EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \
+ \
+ (_rc) = 0; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+
+#if EFSYS_OPT_RX_SCALE
+
+static __checkReturn efx_rc_t
+siena_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert)
+{
+ efx_rc_t rc;
+
+ switch (alg) {
+ case EFX_RX_HASHALG_LFSR:
+ EFX_RX_LFSR_HASH(enp, insert);
+ break;
+
+ case EFX_RX_HASHALG_TOEPLITZ:
+ EFX_RX_TOEPLITZ_IPV4_HASH(enp, insert,
+ type & EFX_RX_HASH_IPV4,
+ type & EFX_RX_HASH_TCPIPV4);
+
+ EFX_RX_TOEPLITZ_IPV6_HASH(enp,
+ type & EFX_RX_HASH_IPV6,
+ type & EFX_RX_HASH_TCPIPV6,
+ rc);
+ if (rc != 0)
+ goto fail1;
+
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ EFX_RX_LFSR_HASH(enp, B_FALSE);
+
+ return (rc);
+}
+#endif
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+siena_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ efx_oword_t oword;
+ unsigned int byte;
+ unsigned int offset;
+ efx_rc_t rc;
+
+ byte = 0;
+
+ /* Write Toeplitz IPv4 hash key */
+ EFX_ZERO_OWORD(oword);
+ for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword);
+
+ byte = 0;
+
+ /* Verify Toeplitz IPv4 hash key */
+ EFX_BAR_READO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword);
+ for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ }
+
+ if ((enp->en_features & EFX_FEATURE_IPV6) == 0)
+ goto done;
+
+ byte = 0;
+
+ /* Write Toeplitz IPv6 hash key 3 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
+
+ /* Write Toeplitz IPv6 hash key 2 */
+ EFX_ZERO_OWORD(oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword);
+
+ /* Write Toeplitz IPv6 hash key 1 */
+ EFX_ZERO_OWORD(oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword);
+
+ byte = 0;
+
+ /* Verify Toeplitz IPv6 hash key 3 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ }
+
+ /* Verify Toeplitz IPv6 hash key 2 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail3;
+ }
+ }
+
+ /* Verify Toeplitz IPv6 hash key 1 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail4;
+ }
+ }
+
+done:
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+siena_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ efx_oword_t oword;
+ int index;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(EFX_RSS_TBL_SIZE == FR_BZ_RX_INDIRECTION_TBL_ROWS);
+ EFX_STATIC_ASSERT(EFX_MAXRSS == (1 << FRF_BZ_IT_QUEUE_WIDTH));
+
+ if (n > FR_BZ_RX_INDIRECTION_TBL_ROWS) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS; index++) {
+ uint32_t byte;
+
+ /* Calculate the entry to place in the table */
+ byte = (n > 0) ? (uint32_t)table[index % n] : 0;
+
+ EFSYS_PROBE2(table, int, index, uint32_t, byte);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_BZ_IT_QUEUE, byte);
+
+ /* Write the table */
+ EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL,
+ index, &oword, B_TRUE);
+ }
+
+ for (index = FR_BZ_RX_INDIRECTION_TBL_ROWS - 1; index >= 0; --index) {
+ uint32_t byte;
+
+ /* Determine if we're starting a new batch */
+ byte = (n > 0) ? (uint32_t)table[index % n] : 0;
+
+ /* Read the table */
+ EFX_BAR_TBL_READO(enp, FR_BZ_RX_INDIRECTION_TBL,
+ index, &oword, B_TRUE);
+
+ /* Verify the entry */
+ if (EFX_OWORD_FIELD(oword, FRF_BZ_IT_QUEUE) != byte) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif
+
+/*
+ * Falcon/Siena pseudo-header
+ * --------------------------
+ *
+ * Receive packets are prefixed by an optional 16 byte pseudo-header.
+ * The pseudo-header is a byte array of one of the forms:
+ *
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.TT.TT.TT.TT
+ * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.LL.LL
+ *
+ * where:
+ * TT.TT.TT.TT Toeplitz hash (32-bit big-endian)
+ * LL.LL LFSR hash (16-bit big-endian)
+ */
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn uint32_t
+siena_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ switch (func) {
+ case EFX_RX_HASHALG_TOEPLITZ:
+ return ((buffer[12] << 24) |
+ (buffer[13] << 16) |
+ (buffer[14] << 8) |
+ buffer[15]);
+
+ case EFX_RX_HASHALG_LFSR:
+ return ((buffer[14] << 8) | buffer[15]);
+
+ default:
+ EFSYS_ASSERT(0);
+ return (0);
+ }
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+static __checkReturn efx_rc_t
+siena_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp)
+{
+ _NOTE(ARGUNUSED(enp, buffer, lengthp))
+
+ /* Not supported by Falcon/Siena hardware */
+ EFSYS_ASSERT(0);
+ return (ENOTSUP);
+}
+
+
+static void
+siena_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added)
+{
+ efx_qword_t qword;
+ unsigned int i;
+ unsigned int offset;
+ unsigned int id;
+
+ /* The client driver must not overfill the queue */
+ EFSYS_ASSERT3U(added - completed + n, <=,
+ EFX_RXQ_LIMIT(erp->er_mask + 1));
+
+ id = added & (erp->er_mask);
+ for (i = 0; i < n; i++) {
+ EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
+ unsigned int, id, efsys_dma_addr_t, addrp[i],
+ size_t, size);
+
+ EFX_POPULATE_QWORD_3(qword,
+ FSF_AZ_RX_KER_BUF_SIZE, (uint32_t)(size),
+ FSF_AZ_RX_KER_BUF_ADDR_DW0,
+ (uint32_t)(addrp[i] & 0xffffffff),
+ FSF_AZ_RX_KER_BUF_ADDR_DW1,
+ (uint32_t)(addrp[i] >> 32));
+
+ offset = id * sizeof (efx_qword_t);
+ EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);
+
+ id = (id + 1) & (erp->er_mask);
+ }
+}
+
+static void
+siena_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ unsigned int pushed = *pushedp;
+ uint32_t wptr;
+ efx_oword_t oword;
+ efx_dword_t dword;
+
+ /* All descriptors are pushed */
+ *pushedp = added;
+
+ /* Push the populated descriptors out */
+ wptr = added & erp->er_mask;
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DESC_WPTR, wptr);
+
+ /* Only write the third DWORD */
+ EFX_POPULATE_DWORD_1(dword,
+ EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3));
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,
+ wptr, pushed & erp->er_mask);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_WRITED3(enp, FR_BZ_RX_DESC_UPD_REGP0,
+ erp->er_index, &dword, B_FALSE);
+}
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+static void
+siena_rx_qps_update_credits(
+ __in efx_rxq_t *erp)
+{
+ /* Not supported by Siena hardware */
+ EFSYS_ASSERT(0);
+}
+
+static uint8_t *
+siena_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp)
+{
+ /* Not supported by Siena hardware */
+ EFSYS_ASSERT(0);
+
+ return (NULL);
+}
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+
+static __checkReturn efx_rc_t
+siena_rx_qflush(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_oword_t oword;
+ uint32_t label;
+
+ label = erp->er_index;
+
+ /* Flush the queue */
+ EFX_POPULATE_OWORD_2(oword, FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_RX_FLUSH_DESCQ, label);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FLUSH_DESCQ_REG, &oword);
+
+ return (0);
+}
+
+static void
+siena_rx_qenable(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_oword_t oword;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DC_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_EN, 1);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+}
+
+static __checkReturn efx_rc_t
+siena_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ uint32_t size;
+ boolean_t jumbo;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(esmp))
+
+ EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS ==
+ (1 << FRF_AZ_RX_DESCQ_LABEL_WIDTH));
+ EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
+ EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);
+
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS));
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS));
+
+ if (!ISP2(n) || (n < EFX_RXQ_MINNDESCS) || (n > EFX_RXQ_MAXNDESCS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_rxq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ for (size = 0; (1 << size) <= (EFX_RXQ_MAXNDESCS / EFX_RXQ_MINNDESCS);
+ size++)
+ if ((1 << size) == (int)(n / EFX_RXQ_MINNDESCS))
+ break;
+ if (id + (1 << size) >= encp->enc_buftbl_limit) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ switch (type) {
+ case EFX_RXQ_TYPE_DEFAULT:
+ jumbo = B_FALSE;
+ break;
+
+#if EFSYS_OPT_RX_SCATTER
+ case EFX_RXQ_TYPE_SCATTER:
+ if (enp->en_family < EFX_FAMILY_SIENA) {
+ rc = EINVAL;
+ goto fail4;
+ }
+ jumbo = B_TRUE;
+ break;
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+ default:
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /* Set up the new descriptor queue */
+ EFX_POPULATE_OWORD_7(oword,
+ FRF_AZ_RX_DESCQ_BUF_BASE_ID, id,
+ FRF_AZ_RX_DESCQ_EVQ_ID, eep->ee_index,
+ FRF_AZ_RX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_RX_DESCQ_LABEL, label,
+ FRF_AZ_RX_DESCQ_SIZE, size,
+ FRF_AZ_RX_DESCQ_TYPE, 0,
+ FRF_AZ_RX_DESCQ_JUMBO, jumbo);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+siena_rx_qdestroy(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_oword_t oword;
+
+ EFSYS_ASSERT(enp->en_rx_qcount != 0);
+ --enp->en_rx_qcount;
+
+ /* Purge descriptor queue */
+ EFX_ZERO_OWORD(oword);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+
+ /* Free the RXQ object */
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
+}
+
+static void
+siena_rx_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_sram.c b/src/seastar/dpdk/drivers/net/sfc/base/efx_sram.c
new file mode 100644
index 00000000..5f4edea7
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_sram.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+ __checkReturn efx_rc_t
+efx_sram_buf_tbl_set(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in efsys_mem_t *esmp,
+ __in size_t n)
+{
+ efx_qword_t qword;
+ uint32_t start = id;
+ uint32_t stop = start + n;
+ efsys_dma_addr_t addr;
+ efx_oword_t oword;
+ unsigned int count;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ if (enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD) {
+ /*
+ * FIXME: the efx_sram_buf_tbl_*() functionality needs to be
+ * pulled inside the Falcon/Siena queue create/destroy code,
+ * and then the original functions can be removed (see bug30834
+ * comment #1). But, for now, we just ensure that they are
+ * no-ops for EF10, to allow bringing up existing drivers
+ * without modification.
+ */
+
+ return (0);
+ }
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ if (stop >= EFX_BUF_TBL_SIZE) {
+ rc = EFBIG;
+ goto fail1;
+ }
+
+ /* Add the entries into the buffer table */
+ addr = EFSYS_MEM_ADDR(esmp);
+ for (id = start; id != stop; id++) {
+ EFX_POPULATE_QWORD_5(qword,
+ FRF_AZ_IP_DAT_BUF_SIZE, 0, FRF_AZ_BUF_ADR_REGION, 0,
+ FRF_AZ_BUF_ADR_FBUF_DW0,
+ (uint32_t)((addr >> 12) & 0xffffffff),
+ FRF_AZ_BUF_ADR_FBUF_DW1,
+ (uint32_t)((addr >> 12) >> 32),
+ FRF_AZ_BUF_OWNER_ID_FBUF, 0);
+
+ EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_FULL_TBL,
+ id, &qword);
+
+ addr += EFX_BUF_SIZE;
+ }
+
+ EFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1);
+
+ /* Flush the write buffer */
+ EFX_POPULATE_OWORD_2(oword, FRF_AZ_BUF_UPD_CMD, 1,
+ FRF_AZ_BUF_CLR_CMD, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
+
+ /* Poll for the last entry being written to the buffer table */
+ EFSYS_ASSERT3U(id, ==, stop);
+ addr -= EFX_BUF_SIZE;
+
+ count = 0;
+ do {
+ EFSYS_PROBE1(wait, unsigned int, count);
+
+ /* Spin for 1 ms */
+ EFSYS_SPIN(1000);
+
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_FULL_TBL,
+ id - 1, &qword);
+
+ if (EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW0) ==
+ (uint32_t)((addr >> 12) & 0xffffffff) &&
+ EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW1) ==
+ (uint32_t)((addr >> 12) >> 32))
+ goto verify;
+
+ } while (++count < 100);
+
+ rc = ETIMEDOUT;
+ goto fail2;
+
+verify:
+ /* Verify the rest of the entries in the buffer table */
+ while (--id != start) {
+ addr -= EFX_BUF_SIZE;
+
+ /* Read the buffer table entry */
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_FULL_TBL,
+ id - 1, &qword);
+
+ if (EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW0) !=
+ (uint32_t)((addr >> 12) & 0xffffffff) ||
+ EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW1) !=
+ (uint32_t)((addr >> 12) >> 32)) {
+ rc = EFAULT;
+ goto fail3;
+ }
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ id = stop;
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFX_POPULATE_OWORD_4(oword, FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1, FRF_AZ_BUF_CLR_END_ID, id - 1,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_sram_buf_tbl_clear(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in size_t n)
+{
+ efx_oword_t oword;
+ uint32_t start = id;
+ uint32_t stop = start + n;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ if (enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD) {
+ /*
+ * FIXME: the efx_sram_buf_tbl_*() functionality needs to be
+ * pulled inside the Falcon/Siena queue create/destroy code,
+ * and then the original functions can be removed (see bug30834
+ * comment #1). But, for now, we just ensure that they are
+ * no-ops for EF10, to allow bringing up existing drivers
+ * without modification.
+ */
+
+ return;
+ }
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ EFSYS_ASSERT3U(stop, <, EFX_BUF_TBL_SIZE);
+
+ EFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1);
+
+ EFX_POPULATE_OWORD_4(oword, FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1, FRF_AZ_BUF_CLR_END_ID, stop - 1,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
+}
+
+
+#if EFSYS_OPT_DIAG
+
+static void
+efx_sram_byte_increment_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
+ unsigned int index;
+
+ _NOTE(ARGUNUSED(negate))
+
+ for (index = 0; index < sizeof (efx_qword_t); index++)
+ eqp->eq_u8[index] = offset + index;
+}
+
+static void
+efx_sram_all_the_same_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ _NOTE(ARGUNUSED(row))
+
+ if (negate)
+ EFX_SET_QWORD(*eqp);
+ else
+ EFX_ZERO_QWORD(*eqp);
+}
+
+static void
+efx_sram_bit_alternate_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ _NOTE(ARGUNUSED(row))
+
+ EFX_POPULATE_QWORD_2(*eqp,
+ EFX_DWORD_0, (negate) ? 0x55555555 : 0xaaaaaaaa,
+ EFX_DWORD_1, (negate) ? 0x55555555 : 0xaaaaaaaa);
+}
+
+static void
+efx_sram_byte_alternate_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ _NOTE(ARGUNUSED(row))
+
+ EFX_POPULATE_QWORD_2(*eqp,
+ EFX_DWORD_0, (negate) ? 0x00ff00ff : 0xff00ff00,
+ EFX_DWORD_1, (negate) ? 0x00ff00ff : 0xff00ff00);
+}
+
+static void
+efx_sram_byte_changing_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
+ unsigned int index;
+
+ for (index = 0; index < sizeof (efx_qword_t); index++) {
+ uint8_t byte;
+
+ if (offset / 256 == 0)
+ byte = (uint8_t)((offset % 257) % 256);
+ else
+ byte = (uint8_t)(~((offset - 8) % 257) % 256);
+
+ eqp->eq_u8[index] = (negate) ? ~byte : byte;
+ }
+}
+
+static void
+efx_sram_bit_sweep_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
+
+ if (negate) {
+ EFX_SET_QWORD(*eqp);
+ EFX_CLEAR_QWORD_BIT(*eqp, (offset / sizeof (efx_qword_t)) % 64);
+ } else {
+ EFX_ZERO_QWORD(*eqp);
+ EFX_SET_QWORD_BIT(*eqp, (offset / sizeof (efx_qword_t)) % 64);
+ }
+}
+
+efx_sram_pattern_fn_t __efx_sram_pattern_fns[] = {
+ efx_sram_byte_increment_set,
+ efx_sram_all_the_same_set,
+ efx_sram_bit_alternate_set,
+ efx_sram_byte_alternate_set,
+ efx_sram_byte_changing_set,
+ efx_sram_bit_sweep_set
+};
+
+ __checkReturn efx_rc_t
+efx_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_pattern_type_t type)
+{
+ efx_sram_pattern_fn_t func;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+
+ /* SRAM testing is only available on Siena. */
+ if (enp->en_family != EFX_FAMILY_SIENA)
+ return (0);
+
+ /* Select pattern generator */
+ EFSYS_ASSERT3U(type, <, EFX_PATTERN_NTYPES);
+ func = __efx_sram_pattern_fns[type];
+
+ return (siena_sram_test(enp, func));
+}
+
+#endif /* EFSYS_OPT_DIAG */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_tx.c b/src/seastar/dpdk/drivers/net/sfc/base/efx_tx.c
new file mode 100644
index 00000000..ceb29206
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_tx.c
@@ -0,0 +1,1097 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_QSTATS
+#define EFX_TX_QSTAT_INCR(_etp, _stat) \
+ do { \
+ (_etp)->et_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_TX_QSTAT_INCR(_etp, _stat)
+#endif
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_tx_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_tx_fini(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp);
+
+static void
+siena_tx_qdestroy(
+ __in efx_txq_t *etp);
+
+static __checkReturn efx_rc_t
+siena_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+static void
+siena_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed);
+
+static __checkReturn efx_rc_t
+siena_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns);
+
+static __checkReturn efx_rc_t
+siena_tx_qflush(
+ __in efx_txq_t *etp);
+
+static void
+siena_tx_qenable(
+ __in efx_txq_t *etp);
+
+ __checkReturn efx_rc_t
+siena_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+ void
+siena_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp);
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
+#endif
+
+#endif /* EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+static const efx_tx_ops_t __efx_tx_siena_ops = {
+ siena_tx_init, /* etxo_init */
+ siena_tx_fini, /* etxo_fini */
+ siena_tx_qcreate, /* etxo_qcreate */
+ siena_tx_qdestroy, /* etxo_qdestroy */
+ siena_tx_qpost, /* etxo_qpost */
+ siena_tx_qpush, /* etxo_qpush */
+ siena_tx_qpace, /* etxo_qpace */
+ siena_tx_qflush, /* etxo_qflush */
+ siena_tx_qenable, /* etxo_qenable */
+ NULL, /* etxo_qpio_enable */
+ NULL, /* etxo_qpio_disable */
+ NULL, /* etxo_qpio_write */
+ NULL, /* etxo_qpio_post */
+ siena_tx_qdesc_post, /* etxo_qdesc_post */
+ siena_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ NULL, /* etxo_qdesc_tso_create */
+ NULL, /* etxo_qdesc_tso2_create */
+ NULL, /* etxo_qdesc_vlantci_create */
+#if EFSYS_OPT_QSTATS
+ siena_tx_qstats_update, /* etxo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+static const efx_tx_ops_t __efx_tx_hunt_ops = {
+ ef10_tx_init, /* etxo_init */
+ ef10_tx_fini, /* etxo_fini */
+ ef10_tx_qcreate, /* etxo_qcreate */
+ ef10_tx_qdestroy, /* etxo_qdestroy */
+ ef10_tx_qpost, /* etxo_qpost */
+ ef10_tx_qpush, /* etxo_qpush */
+ ef10_tx_qpace, /* etxo_qpace */
+ ef10_tx_qflush, /* etxo_qflush */
+ ef10_tx_qenable, /* etxo_qenable */
+ ef10_tx_qpio_enable, /* etxo_qpio_enable */
+ ef10_tx_qpio_disable, /* etxo_qpio_disable */
+ ef10_tx_qpio_write, /* etxo_qpio_write */
+ ef10_tx_qpio_post, /* etxo_qpio_post */
+ ef10_tx_qdesc_post, /* etxo_qdesc_post */
+ ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ ef10_tx_qdesc_tso_create, /* etxo_qdesc_tso_create */
+ ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */
+ ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */
+#if EFSYS_OPT_QSTATS
+ ef10_tx_qstats_update, /* etxo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+static const efx_tx_ops_t __efx_tx_medford_ops = {
+ ef10_tx_init, /* etxo_init */
+ ef10_tx_fini, /* etxo_fini */
+ ef10_tx_qcreate, /* etxo_qcreate */
+ ef10_tx_qdestroy, /* etxo_qdestroy */
+ ef10_tx_qpost, /* etxo_qpost */
+ ef10_tx_qpush, /* etxo_qpush */
+ ef10_tx_qpace, /* etxo_qpace */
+ ef10_tx_qflush, /* etxo_qflush */
+ ef10_tx_qenable, /* etxo_qenable */
+ ef10_tx_qpio_enable, /* etxo_qpio_enable */
+ ef10_tx_qpio_disable, /* etxo_qpio_disable */
+ ef10_tx_qpio_write, /* etxo_qpio_write */
+ ef10_tx_qpio_post, /* etxo_qpio_post */
+ ef10_tx_qdesc_post, /* etxo_qdesc_post */
+ ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ NULL, /* etxo_qdesc_tso_create */
+ ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */
+ ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */
+#if EFSYS_OPT_QSTATS
+ ef10_tx_qstats_update, /* etxo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_tx_init(
+ __in efx_nic_t *enp)
+{
+ const efx_tx_ops_t *etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (!(enp->en_mod_flags & EFX_MOD_EV)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_mod_flags & EFX_MOD_TX) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ etxop = &__efx_tx_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ etxop = &__efx_tx_hunt_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ etxop = &__efx_tx_medford_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail3;
+ }
+
+ EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0);
+
+ if ((rc = etxop->etxo_init(enp)) != 0)
+ goto fail4;
+
+ enp->en_etxop = etxop;
+ enp->en_mod_flags |= EFX_MOD_TX;
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_etxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_TX;
+ return (rc);
+}
+
+ void
+efx_tx_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX);
+ EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0);
+
+ etxop->etxo_fini(enp);
+
+ enp->en_etxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_TX;
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_txq_t **etpp,
+ __out unsigned int *addedp)
+{
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_txq_t *etp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX);
+
+ EFSYS_ASSERT3U(enp->en_tx_qcount + 1, <, encp->enc_txq_limit);
+
+ /* Allocate an TXQ object */
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_txq_t), etp);
+
+ if (etp == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ etp->et_magic = EFX_TXQ_MAGIC;
+ etp->et_enp = enp;
+ etp->et_index = index;
+ etp->et_mask = n - 1;
+ etp->et_esmp = esmp;
+
+ /* Initial descriptor index may be modified by etxo_qcreate */
+ *addedp = 0;
+
+ if ((rc = etxop->etxo_qcreate(enp, index, label, esmp,
+ n, id, flags, eep, etp, addedp)) != 0)
+ goto fail2;
+
+ enp->en_tx_qcount++;
+ *etpp = etp;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qdestroy(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ EFSYS_ASSERT(enp->en_tx_qcount != 0);
+ --enp->en_tx_qcount;
+
+ etxop->etxo_qdestroy(etp);
+
+ /* Free the TXQ object */
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qpost(etp, eb,
+ n, completed, addedp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ etxop->etxo_qpush(etp, added, pushed);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qpace(etp, ns)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qflush(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qflush(etp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qenable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ etxop->etxo_qenable(etp);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpio_enable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (~enp->en_features & EFX_FEATURE_PIO_BUFFERS) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ if (etxop->etxo_qpio_enable == NULL) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ if ((rc = etxop->etxo_qpio_enable(etp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qpio_disable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (etxop->etxo_qpio_disable != NULL)
+ etxop->etxo_qpio_disable(etp);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(buf_length) uint8_t *buffer,
+ __in size_t buf_length,
+ __in size_t pio_buf_offset)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (etxop->etxo_qpio_write != NULL) {
+ if ((rc = etxop->etxo_qpio_write(etp, buffer, buf_length,
+ pio_buf_offset)) != 0)
+ goto fail1;
+ return (0);
+ }
+
+ return (ENOTSUP);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (etxop->etxo_qpio_post != NULL) {
+ if ((rc = etxop->etxo_qpio_post(etp, pkt_length, completed,
+ addedp)) != 0)
+ goto fail1;
+ return (0);
+ }
+
+ return (ENOTSUP);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qdesc_post(etp, ed,
+ n, completed, addedp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_dma_create != NULL);
+
+ etxop->etxo_qdesc_dma_create(etp, addr, size, eop, edp);
+}
+
+ void
+efx_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_tso_create != NULL);
+
+ etxop->etxo_qdesc_tso_create(etp, ipv4_id, tcp_seq, tcp_flags, edp);
+}
+
+ void
+efx_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_tso2_create != NULL);
+
+ etxop->etxo_qdesc_tso2_create(etp, ipv4_id, tcp_seq, mss, edp, count);
+}
+
+ void
+efx_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t tci,
+ __out efx_desc_t *edp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_vlantci_create != NULL);
+
+ etxop->etxo_qdesc_vlantci_create(etp, tci, edp);
+}
+
+
+#if EFSYS_OPT_QSTATS
+ void
+efx_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ etxop->etxo_qstats_update(etp, stat);
+}
+#endif
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_tx_init(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /*
+ * Disable the timer-based TX DMA backoff and allow TX DMA to be
+ * controlled by the RX FIFO fill level (although always allow a
+ * minimal trickle).
+ */
+ EFX_BAR_READO(enp, FR_AZ_TX_RESERVED_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER, 0xfe);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER_EN, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DIS_NON_IP_EV, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_THRESHOLD, 2);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
+
+ /*
+ * Filter all packets less than 14 bytes to avoid parsing
+ * errors.
+ */
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_RESERVED_REG, &oword);
+
+ /*
+ * Do not set TX_NO_EOP_DISC_EN, since it limits packets to 16
+ * descriptors (which is bad).
+ */
+ EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword);
+
+ return (0);
+}
+
+#define EFX_TX_DESC(_etp, _addr, _size, _eop, _added) \
+ do { \
+ unsigned int id; \
+ size_t offset; \
+ efx_qword_t qword; \
+ \
+ id = (_added)++ & (_etp)->et_mask; \
+ offset = id * sizeof (efx_qword_t); \
+ \
+ EFSYS_PROBE5(tx_post, unsigned int, (_etp)->et_index, \
+ unsigned int, id, efsys_dma_addr_t, (_addr), \
+ size_t, (_size), boolean_t, (_eop)); \
+ \
+ EFX_POPULATE_QWORD_4(qword, \
+ FSF_AZ_TX_KER_CONT, (_eop) ? 0 : 1, \
+ FSF_AZ_TX_KER_BYTE_COUNT, (uint32_t)(_size), \
+ FSF_AZ_TX_KER_BUF_ADDR_DW0, \
+ (uint32_t)((_addr) & 0xffffffff), \
+ FSF_AZ_TX_KER_BUF_ADDR_DW1, \
+ (uint32_t)((_addr) >> 32)); \
+ EFSYS_MEM_WRITEQ((_etp)->et_esmp, offset, &qword); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+static __checkReturn efx_rc_t
+siena_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ int rc = ENOSPC;
+
+ if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1))
+ goto fail1;
+
+ for (i = 0; i < n; i++) {
+ efx_buffer_t *ebp = &eb[i];
+ efsys_dma_addr_t start = ebp->eb_addr;
+ size_t size = ebp->eb_size;
+ efsys_dma_addr_t end = start + size;
+
+ /*
+ * Fragments must not span 4k boundaries.
+ * Here it is a stricter requirement than the maximum length.
+ */
+ EFSYS_ASSERT(P2ROUNDUP(start + 1,
+ etp->et_enp->en_nic_cfg.enc_tx_dma_desc_boundary) >= end);
+
+ EFX_TX_DESC(etp, start, size, ebp->eb_eop, added);
+ }
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+siena_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed)
+{
+ efx_nic_t *enp = etp->et_enp;
+ uint32_t wptr;
+ efx_dword_t dword;
+ efx_oword_t oword;
+
+ /* Push the populated descriptors out */
+ wptr = added & etp->et_mask;
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DESC_WPTR, wptr);
+
+ /* Only write the third DWORD */
+ EFX_POPULATE_DWORD_1(dword,
+ EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3));
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
+ wptr, pushed & etp->et_mask);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_WRITED3(enp, FR_BZ_TX_DESC_UPD_REGP0,
+ etp->et_index, &dword, B_FALSE);
+}
+
+#define EFX_MAX_PACE_VALUE 20
+
+static __checkReturn efx_rc_t
+siena_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ unsigned int pace_val;
+ unsigned int timer_period;
+ efx_rc_t rc;
+
+ if (ns == 0) {
+ pace_val = 0;
+ } else {
+ /*
+ * The pace_val to write into the table is s.t
+ * ns <= timer_period * (2 ^ pace_val)
+ */
+ timer_period = 104 / encp->enc_clk_mult;
+ for (pace_val = 1; pace_val <= EFX_MAX_PACE_VALUE; pace_val++) {
+ if ((timer_period << pace_val) >= ns)
+ break;
+ }
+ }
+ if (pace_val > EFX_MAX_PACE_VALUE) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Update the pacing table */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_PACE, pace_val);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_PACE_TBL, etp->et_index,
+ &oword, B_TRUE);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_tx_qflush(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_oword_t oword;
+ uint32_t label;
+
+ efx_tx_qpace(etp, 0);
+
+ label = etp->et_index;
+
+ /* Flush the queue */
+ EFX_POPULATE_OWORD_2(oword, FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_TX_FLUSH_DESCQ, label);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_FLUSH_DESCQ_REG, &oword);
+
+ return (0);
+}
+
+static void
+siena_tx_qenable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_oword_t oword;
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+
+ EFSYS_PROBE5(tx_descq_ptr, unsigned int, etp->et_index,
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_3),
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_2),
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_1),
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_0));
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DC_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_EN, 1);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+}
+
+static __checkReturn efx_rc_t
+siena_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ uint32_t size;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(esmp))
+
+ EFX_STATIC_ASSERT(EFX_EV_TX_NLABELS ==
+ (1 << FRF_AZ_TX_DESCQ_LABEL_WIDTH));
+ EFSYS_ASSERT3U(label, <, EFX_EV_TX_NLABELS);
+
+ EFSYS_ASSERT(ISP2(encp->enc_txq_max_ndescs));
+ EFX_STATIC_ASSERT(ISP2(EFX_TXQ_MINNDESCS));
+
+ if (!ISP2(n) || (n < EFX_TXQ_MINNDESCS) || (n > EFX_EVQ_MAXNEVS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_txq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ for (size = 0;
+ (1 << size) <= (int)(encp->enc_txq_max_ndescs / EFX_TXQ_MINNDESCS);
+ size++)
+ if ((1 << size) == (int)(n / EFX_TXQ_MINNDESCS))
+ break;
+ if (id + (1 << size) >= encp->enc_buftbl_limit) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ /* Set up the new descriptor queue */
+ *addedp = 0;
+
+ EFX_POPULATE_OWORD_6(oword,
+ FRF_AZ_TX_DESCQ_BUF_BASE_ID, id,
+ FRF_AZ_TX_DESCQ_EVQ_ID, eep->ee_index,
+ FRF_AZ_TX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_TX_DESCQ_LABEL, label,
+ FRF_AZ_TX_DESCQ_SIZE, size,
+ FRF_AZ_TX_DESCQ_TYPE, 0);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_NON_IP_DROP_DIS, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_IP_CHKSM_DIS,
+ (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_TCP_CHKSM_DIS,
+ (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ for (i = 0; i < n; i++) {
+ efx_desc_t *edp = &ed[i];
+ unsigned int id;
+ size_t offset;
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_desc_t);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
+ }
+
+ EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
+ unsigned int, added, unsigned int, n);
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+siena_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp)
+{
+ /*
+ * Fragments must not span 4k boundaries.
+ * Here it is a stricter requirement than the maximum length.
+ */
+ EFSYS_ASSERT(P2ROUNDUP(addr + 1,
+ etp->et_enp->en_nic_cfg.enc_tx_dma_desc_boundary) >= addr + size);
+
+ EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
+ efsys_dma_addr_t, addr,
+ size_t, size, boolean_t, eop);
+
+ EFX_POPULATE_QWORD_4(edp->ed_eq,
+ FSF_AZ_TX_KER_CONT, eop ? 0 : 1,
+ FSF_AZ_TX_KER_BYTE_COUNT, (uint32_t)size,
+ FSF_AZ_TX_KER_BUF_ADDR_DW0,
+ (uint32_t)(addr & 0xffffffff),
+ FSF_AZ_TX_KER_BUF_ADDR_DW1,
+ (uint32_t)(addr >> 32));
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_QSTATS
+#if EFSYS_OPT_NAMES
+/* START MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock 2866874ecd7a363b */
+static const char * const __efx_tx_qstat_name[] = {
+ "post",
+ "post_pio",
+};
+/* END MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock */
+
+ const char *
+efx_tx_qstat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(id, <, TX_NQSTATS);
+
+ return (__efx_tx_qstat_name[id]);
+}
+#endif /* EFSYS_OPT_NAMES */
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < TX_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, etp->et_stat[id]);
+ etp->et_stat[id] = 0;
+ }
+}
+#endif /* EFSYS_OPT_QSTATS */
+
+static void
+siena_tx_qdestroy(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_oword_t oword;
+
+ /* Purge descriptor queue */
+ EFX_ZERO_OWORD(oword);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+}
+
+static void
+siena_tx_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_types.h b/src/seastar/dpdk/drivers/net/sfc/base/efx_types.h
new file mode 100644
index 00000000..b8ee14a6
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_types.h
@@ -0,0 +1,1647 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ *
+ * Ackowledgement to Fen Systems Ltd.
+ */
+
+#ifndef _SYS_EFX_TYPES_H
+#define _SYS_EFX_TYPES_H
+
+#include "efsys.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Bitfield access
+ *
+ * Solarflare NICs make extensive use of bitfields up to 128 bits
+ * wide. Since there is no native 128-bit datatype on most systems,
+ * and since 64-bit datatypes are inefficient on 32-bit systems and
+ * vice versa, we wrap accesses in a way that uses the most efficient
+ * datatype.
+ *
+ * The NICs are PCI devices and therefore little-endian. Since most
+ * of the quantities that we deal with are DMAed to/from host memory,
+ * we define our datatypes (efx_oword_t, efx_qword_t and efx_dword_t)
+ * to be little-endian.
+ *
+ * In the less common case of using PIO for individual register
+ * writes, we construct the little-endian datatype in host memory and
+ * then use non-swapping register access primitives, rather than
+ * constructing a native-endian datatype and relying on implicit
+ * byte-swapping. (We use a similar strategy for register reads.)
+ */
+
+/*
+ * NOTE: Field definitions here and elsewhere are done in terms of a lowest
+ * bit number (LBN) and a width.
+ */
+
+#define EFX_DUMMY_FIELD_LBN 0
+#define EFX_DUMMY_FIELD_WIDTH 0
+
+#define EFX_BYTE_0_LBN 0
+#define EFX_BYTE_0_WIDTH 8
+
+#define EFX_BYTE_1_LBN 8
+#define EFX_BYTE_1_WIDTH 8
+
+#define EFX_BYTE_2_LBN 16
+#define EFX_BYTE_2_WIDTH 8
+
+#define EFX_BYTE_3_LBN 24
+#define EFX_BYTE_3_WIDTH 8
+
+#define EFX_BYTE_4_LBN 32
+#define EFX_BYTE_4_WIDTH 8
+
+#define EFX_BYTE_5_LBN 40
+#define EFX_BYTE_5_WIDTH 8
+
+#define EFX_BYTE_6_LBN 48
+#define EFX_BYTE_6_WIDTH 8
+
+#define EFX_BYTE_7_LBN 56
+#define EFX_BYTE_7_WIDTH 8
+
+#define EFX_WORD_0_LBN 0
+#define EFX_WORD_0_WIDTH 16
+
+#define EFX_WORD_1_LBN 16
+#define EFX_WORD_1_WIDTH 16
+
+#define EFX_WORD_2_LBN 32
+#define EFX_WORD_2_WIDTH 16
+
+#define EFX_WORD_3_LBN 48
+#define EFX_WORD_3_WIDTH 16
+
+#define EFX_DWORD_0_LBN 0
+#define EFX_DWORD_0_WIDTH 32
+
+#define EFX_DWORD_1_LBN 32
+#define EFX_DWORD_1_WIDTH 32
+
+#define EFX_DWORD_2_LBN 64
+#define EFX_DWORD_2_WIDTH 32
+
+#define EFX_DWORD_3_LBN 96
+#define EFX_DWORD_3_WIDTH 32
+
+/* There are intentionally no EFX_QWORD_0 or EFX_QWORD_1 field definitions
+ * here as the implementaion of EFX_QWORD_FIELD and EFX_OWORD_FIELD do not
+ * support field widths larger than 32 bits.
+ */
+
+/* Specified attribute (i.e. LBN ow WIDTH) of the specified field */
+#define EFX_VAL(_field, _attribute) \
+ _field ## _ ## _attribute
+
+/* Lowest bit number of the specified field */
+#define EFX_LOW_BIT(_field) \
+ EFX_VAL(_field, LBN)
+
+/* Width of the specified field */
+#define EFX_WIDTH(_field) \
+ EFX_VAL(_field, WIDTH)
+
+/* Highest bit number of the specified field */
+#define EFX_HIGH_BIT(_field) \
+ (EFX_LOW_BIT(_field) + EFX_WIDTH(_field) - 1)
+
+/*
+ * 64-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x000000000000001f.
+ */
+#define EFX_MASK64(_field) \
+ ((EFX_WIDTH(_field) == 64) ? ~((uint64_t)0) : \
+ (((((uint64_t)1) << EFX_WIDTH(_field))) - 1))
+/*
+ * 32-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x0000001f.
+ */
+#define EFX_MASK32(_field) \
+ ((EFX_WIDTH(_field) == 32) ? ~((uint32_t)0) : \
+ (((((uint32_t)1) << EFX_WIDTH(_field))) - 1))
+
+/*
+ * 16-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x001f.
+ */
+#define EFX_MASK16(_field) \
+ ((EFX_WIDTH(_field) == 16) ? 0xffffu : \
+ (uint16_t)((1 << EFX_WIDTH(_field)) - 1))
+
+/*
+ * 8-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x1f.
+ */
+#define EFX_MASK8(_field) \
+ ((uint8_t)((1 << EFX_WIDTH(_field)) - 1))
+
+#pragma pack(1)
+
+/*
+ * A byte (i.e. 8-bit) datatype
+ */
+typedef union efx_byte_u {
+ uint8_t eb_u8[1];
+} efx_byte_t;
+
+/*
+ * A word (i.e. 16-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_word_u {
+ efx_byte_t ew_byte[2];
+ uint16_t ew_u16[1];
+ uint8_t ew_u8[2];
+} efx_word_t;
+
+/*
+ * A doubleword (i.e. 32-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_dword_u {
+ efx_byte_t ed_byte[4];
+ efx_word_t ed_word[2];
+ uint32_t ed_u32[1];
+ uint16_t ed_u16[2];
+ uint8_t ed_u8[4];
+} efx_dword_t;
+
+/*
+ * A quadword (i.e. 64-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_qword_u {
+ efx_byte_t eq_byte[8];
+ efx_word_t eq_word[4];
+ efx_dword_t eq_dword[2];
+#if EFSYS_HAS_UINT64
+ uint64_t eq_u64[1];
+#endif
+ uint32_t eq_u32[2];
+ uint16_t eq_u16[4];
+ uint8_t eq_u8[8];
+} efx_qword_t;
+
+/*
+ * An octword (i.e. 128-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_oword_u {
+ efx_byte_t eo_byte[16];
+ efx_word_t eo_word[8];
+ efx_dword_t eo_dword[4];
+ efx_qword_t eo_qword[2];
+#if EFSYS_HAS_SSE2_M128
+ __m128i eo_u128[1];
+#endif
+#if EFSYS_HAS_UINT64
+ uint64_t eo_u64[2];
+#endif
+ uint32_t eo_u32[4];
+ uint16_t eo_u16[8];
+ uint8_t eo_u8[16];
+} efx_oword_t;
+
+#pragma pack()
+
+#define __SWAP16(_x) \
+ ((((_x) & 0xff) << 8) | \
+ (((_x) >> 8) & 0xff))
+
+#define __SWAP32(_x) \
+ ((__SWAP16((_x) & 0xffff) << 16) | \
+ __SWAP16(((_x) >> 16) & 0xffff))
+
+#define __SWAP64(_x) \
+ ((__SWAP32((_x) & 0xffffffff) << 32) | \
+ __SWAP32(((_x) >> 32) & 0xffffffff))
+
+#define __NOSWAP16(_x) (_x)
+#define __NOSWAP32(_x) (_x)
+#define __NOSWAP64(_x) (_x)
+
+#if EFSYS_IS_BIG_ENDIAN
+
+#define __CPU_TO_LE_16(_x) ((uint16_t)__SWAP16(_x))
+#define __LE_TO_CPU_16(_x) ((uint16_t)__SWAP16(_x))
+#define __CPU_TO_BE_16(_x) ((uint16_t)__NOSWAP16(_x))
+#define __BE_TO_CPU_16(_x) ((uint16_t)__NOSWAP16(_x))
+
+#define __CPU_TO_LE_32(_x) ((uint32_t)__SWAP32(_x))
+#define __LE_TO_CPU_32(_x) ((uint32_t)__SWAP32(_x))
+#define __CPU_TO_BE_32(_x) ((uint32_t)__NOSWAP32(_x))
+#define __BE_TO_CPU_32(_x) ((uint32_t)__NOSWAP32(_x))
+
+#define __CPU_TO_LE_64(_x) ((uint64_t)__SWAP64(_x))
+#define __LE_TO_CPU_64(_x) ((uint64_t)__SWAP64(_x))
+#define __CPU_TO_BE_64(_x) ((uint64_t)__NOSWAP64(_x))
+#define __BE_TO_CPU_64(_x) ((uint64_t)__NOSWAP64(_x))
+
+#elif EFSYS_IS_LITTLE_ENDIAN
+
+#define __CPU_TO_LE_16(_x) ((uint16_t)__NOSWAP16(_x))
+#define __LE_TO_CPU_16(_x) ((uint16_t)__NOSWAP16(_x))
+#define __CPU_TO_BE_16(_x) ((uint16_t)__SWAP16(_x))
+#define __BE_TO_CPU_16(_x) ((uint16_t)__SWAP16(_x))
+
+#define __CPU_TO_LE_32(_x) ((uint32_t)__NOSWAP32(_x))
+#define __LE_TO_CPU_32(_x) ((uint32_t)__NOSWAP32(_x))
+#define __CPU_TO_BE_32(_x) ((uint32_t)__SWAP32(_x))
+#define __BE_TO_CPU_32(_x) ((uint32_t)__SWAP32(_x))
+
+#define __CPU_TO_LE_64(_x) ((uint64_t)__NOSWAP64(_x))
+#define __LE_TO_CPU_64(_x) ((uint64_t)__NOSWAP64(_x))
+#define __CPU_TO_BE_64(_x) ((uint64_t)__SWAP64(_x))
+#define __BE_TO_CPU_64(_x) ((uint64_t)__SWAP64(_x))
+
+#else
+
+#error "Neither of EFSYS_IS_{BIG,LITTLE}_ENDIAN is set"
+
+#endif
+
+#define __NATIVE_8(_x) (uint8_t)(_x)
+
+/* Format string for printing an efx_byte_t */
+#define EFX_BYTE_FMT "0x%02x"
+
+/* Format string for printing an efx_word_t */
+#define EFX_WORD_FMT "0x%04x"
+
+/* Format string for printing an efx_dword_t */
+#define EFX_DWORD_FMT "0x%08x"
+
+/* Format string for printing an efx_qword_t */
+#define EFX_QWORD_FMT "0x%08x:%08x"
+
+/* Format string for printing an efx_oword_t */
+#define EFX_OWORD_FMT "0x%08x:%08x:%08x:%08x"
+
+/* Parameters for printing an efx_byte_t */
+#define EFX_BYTE_VAL(_byte) \
+ ((unsigned int)__NATIVE_8((_byte).eb_u8[0]))
+
+/* Parameters for printing an efx_word_t */
+#define EFX_WORD_VAL(_word) \
+ ((unsigned int)__LE_TO_CPU_16((_word).ew_u16[0]))
+
+/* Parameters for printing an efx_dword_t */
+#define EFX_DWORD_VAL(_dword) \
+ ((unsigned int)__LE_TO_CPU_32((_dword).ed_u32[0]))
+
+/* Parameters for printing an efx_qword_t */
+#define EFX_QWORD_VAL(_qword) \
+ ((unsigned int)__LE_TO_CPU_32((_qword).eq_u32[1])), \
+ ((unsigned int)__LE_TO_CPU_32((_qword).eq_u32[0]))
+
+/* Parameters for printing an efx_oword_t */
+#define EFX_OWORD_VAL(_oword) \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[3])), \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[2])), \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[1])), \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[0]))
+
+/*
+ * Stop lint complaining about some shifts.
+ */
+#ifdef __lint
+extern int fix_lint;
+#define FIX_LINT(_x) (_x + fix_lint)
+#else
+#define FIX_LINT(_x) (_x)
+#endif
+
+/*
+ * Extract bit field portion [low,high) from the native-endian element
+ * which contains bits [min,max).
+ *
+ * For example, suppose "element" represents the high 32 bits of a
+ * 64-bit value, and we wish to extract the bits belonging to the bit
+ * field occupying bits 28-45 of this 64-bit value.
+ *
+ * Then EFX_EXTRACT(_element, 32, 63, 28, 45) would give
+ *
+ * (_element) << 4
+ *
+ * The result will contain the relevant bits filled in in the range
+ * [0,high-low), with garbage in bits [high-low+1,...).
+ */
+#define EFX_EXTRACT_NATIVE(_element, _min, _max, _low, _high) \
+ ((FIX_LINT(_low > _max) || FIX_LINT(_high < _min)) ? \
+ 0U : \
+ ((_low > _min) ? \
+ ((_element) >> (_low - _min)) : \
+ ((_element) << (_min - _low))))
+
+/*
+ * Extract bit field portion [low,high) from the 64-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT64(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__LE_TO_CPU_64(_element), _min, _max, _low, _high)
+
+/*
+ * Extract bit field portion [low,high) from the 32-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT32(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__LE_TO_CPU_32(_element), _min, _max, _low, _high)
+
+/*
+ * Extract bit field portion [low,high) from the 16-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT16(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__LE_TO_CPU_16(_element), _min, _max, _low, _high)
+
+/*
+ * Extract bit field portion [low,high) from the 8-bit
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT8(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__NATIVE_8(_element), _min, _max, _low, _high)
+
+#define EFX_EXTRACT_OWORD64(_oword, _low, _high) \
+ (EFX_EXTRACT64((_oword).eo_u64[0], FIX_LINT(0), FIX_LINT(63), \
+ _low, _high) | \
+ EFX_EXTRACT64((_oword).eo_u64[1], FIX_LINT(64), FIX_LINT(127), \
+ _low, _high))
+
+#define EFX_EXTRACT_OWORD32(_oword, _low, _high) \
+ (EFX_EXTRACT32((_oword).eo_u32[0], FIX_LINT(0), FIX_LINT(31), \
+ _low, _high) | \
+ EFX_EXTRACT32((_oword).eo_u32[1], FIX_LINT(32), FIX_LINT(63), \
+ _low, _high) | \
+ EFX_EXTRACT32((_oword).eo_u32[2], FIX_LINT(64), FIX_LINT(95), \
+ _low, _high) | \
+ EFX_EXTRACT32((_oword).eo_u32[3], FIX_LINT(96), FIX_LINT(127), \
+ _low, _high))
+
+#define EFX_EXTRACT_QWORD64(_qword, _low, _high) \
+ (EFX_EXTRACT64((_qword).eq_u64[0], FIX_LINT(0), FIX_LINT(63), \
+ _low, _high))
+
+#define EFX_EXTRACT_QWORD32(_qword, _low, _high) \
+ (EFX_EXTRACT32((_qword).eq_u32[0], FIX_LINT(0), FIX_LINT(31), \
+ _low, _high) | \
+ EFX_EXTRACT32((_qword).eq_u32[1], FIX_LINT(32), FIX_LINT(63), \
+ _low, _high))
+
+#define EFX_EXTRACT_DWORD(_dword, _low, _high) \
+ (EFX_EXTRACT32((_dword).ed_u32[0], FIX_LINT(0), FIX_LINT(31), \
+ _low, _high))
+
+#define EFX_EXTRACT_WORD(_word, _low, _high) \
+ (EFX_EXTRACT16((_word).ew_u16[0], FIX_LINT(0), FIX_LINT(15), \
+ _low, _high))
+
+#define EFX_EXTRACT_BYTE(_byte, _low, _high) \
+ (EFX_EXTRACT8((_byte).eb_u8[0], FIX_LINT(0), FIX_LINT(7), \
+ _low, _high))
+
+
+#define EFX_OWORD_FIELD64(_oword, _field) \
+ ((uint32_t)EFX_EXTRACT_OWORD64(_oword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_OWORD_FIELD32(_oword, _field) \
+ (EFX_EXTRACT_OWORD32(_oword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_QWORD_FIELD64(_qword, _field) \
+ ((uint32_t)EFX_EXTRACT_QWORD64(_qword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_QWORD_FIELD32(_qword, _field) \
+ (EFX_EXTRACT_QWORD32(_qword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_DWORD_FIELD(_dword, _field) \
+ (EFX_EXTRACT_DWORD(_dword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_WORD_FIELD(_word, _field) \
+ (EFX_EXTRACT_WORD(_word, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK16(_field))
+
+#define EFX_BYTE_FIELD(_byte, _field) \
+ (EFX_EXTRACT_BYTE(_byte, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK8(_field))
+
+
+#define EFX_OWORD_IS_EQUAL64(_oword_a, _oword_b) \
+ ((_oword_a).eo_u64[0] == (_oword_b).eo_u64[0] && \
+ (_oword_a).eo_u64[1] == (_oword_b).eo_u64[1])
+
+#define EFX_OWORD_IS_EQUAL32(_oword_a, _oword_b) \
+ ((_oword_a).eo_u32[0] == (_oword_b).eo_u32[0] && \
+ (_oword_a).eo_u32[1] == (_oword_b).eo_u32[1] && \
+ (_oword_a).eo_u32[2] == (_oword_b).eo_u32[2] && \
+ (_oword_a).eo_u32[3] == (_oword_b).eo_u32[3])
+
+#define EFX_QWORD_IS_EQUAL64(_qword_a, _qword_b) \
+ ((_qword_a).eq_u64[0] == (_qword_b).eq_u64[0])
+
+#define EFX_QWORD_IS_EQUAL32(_qword_a, _qword_b) \
+ ((_qword_a).eq_u32[0] == (_qword_b).eq_u32[0] && \
+ (_qword_a).eq_u32[1] == (_qword_b).eq_u32[1])
+
+#define EFX_DWORD_IS_EQUAL(_dword_a, _dword_b) \
+ ((_dword_a).ed_u32[0] == (_dword_b).ed_u32[0])
+
+#define EFX_WORD_IS_EQUAL(_word_a, _word_b) \
+ ((_word_a).ew_u16[0] == (_word_b).ew_u16[0])
+
+#define EFX_BYTE_IS_EQUAL(_byte_a, _byte_b) \
+ ((_byte_a).eb_u8[0] == (_byte_b).eb_u8[0])
+
+
+#define EFX_OWORD_IS_ZERO64(_oword) \
+ (((_oword).eo_u64[0] | \
+ (_oword).eo_u64[1]) == 0)
+
+#define EFX_OWORD_IS_ZERO32(_oword) \
+ (((_oword).eo_u32[0] | \
+ (_oword).eo_u32[1] | \
+ (_oword).eo_u32[2] | \
+ (_oword).eo_u32[3]) == 0)
+
+#define EFX_QWORD_IS_ZERO64(_qword) \
+ (((_qword).eq_u64[0]) == 0)
+
+#define EFX_QWORD_IS_ZERO32(_qword) \
+ (((_qword).eq_u32[0] | \
+ (_qword).eq_u32[1]) == 0)
+
+#define EFX_DWORD_IS_ZERO(_dword) \
+ (((_dword).ed_u32[0]) == 0)
+
+#define EFX_WORD_IS_ZERO(_word) \
+ (((_word).ew_u16[0]) == 0)
+
+#define EFX_BYTE_IS_ZERO(_byte) \
+ (((_byte).eb_u8[0]) == 0)
+
+
+#define EFX_OWORD_IS_SET64(_oword) \
+ (((_oword).eo_u64[0] & \
+ (_oword).eo_u64[1]) == ~((uint64_t)0))
+
+#define EFX_OWORD_IS_SET32(_oword) \
+ (((_oword).eo_u32[0] & \
+ (_oword).eo_u32[1] & \
+ (_oword).eo_u32[2] & \
+ (_oword).eo_u32[3]) == ~((uint32_t)0))
+
+#define EFX_QWORD_IS_SET64(_qword) \
+ (((_qword).eq_u64[0]) == ~((uint64_t)0))
+
+#define EFX_QWORD_IS_SET32(_qword) \
+ (((_qword).eq_u32[0] & \
+ (_qword).eq_u32[1]) == ~((uint32_t)0))
+
+#define EFX_DWORD_IS_SET(_dword) \
+ ((_dword).ed_u32[0] == ~((uint32_t)0))
+
+#define EFX_WORD_IS_SET(_word) \
+ ((_word).ew_u16[0] == ~((uint16_t)0))
+
+#define EFX_BYTE_IS_SET(_byte) \
+ ((_byte).eb_u8[0] == ~((uint8_t)0))
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the bit field [low,high) that lies within
+ * the range [min,max).
+ */
+
+#define EFX_INSERT_NATIVE64(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ ((_low > _min) ? \
+ (((uint64_t)(_value)) << (_low - _min)) : \
+ (((uint64_t)(_value)) >> (_min - _low))))
+
+#define EFX_INSERT_NATIVE32(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ ((_low > _min) ? \
+ (((uint32_t)(_value)) << (_low - _min)) : \
+ (((uint32_t)(_value)) >> (_min - _low))))
+
+#define EFX_INSERT_NATIVE16(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ (uint16_t)((_low > _min) ? \
+ ((_value) << (_low - _min)) : \
+ ((_value) >> (_min - _low))))
+
+#define EFX_INSERT_NATIVE8(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ (uint8_t)((_low > _min) ? \
+ ((_value) << (_low - _min)) : \
+ ((_value) >> (_min - _low))))
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the named bit field that lies within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELD_NATIVE64(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE64(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+#define EFX_INSERT_FIELD_NATIVE32(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE32(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+#define EFX_INSERT_FIELD_NATIVE16(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE16(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+#define EFX_INSERT_FIELD_NATIVE8(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE8(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+/*
+ * Construct bit field
+ *
+ * Creates the portion of the named bit fields that lie within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELDS64(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __CPU_TO_LE_64( \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field10, _value10))
+
+#define EFX_INSERT_FIELDS32(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __CPU_TO_LE_32( \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field10, _value10))
+
+#define EFX_INSERT_FIELDS16(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __CPU_TO_LE_16( \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field10, _value10))
+
+#define EFX_INSERT_FIELDS8(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __NATIVE_8( \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field10, _value10))
+
+#define EFX_POPULATE_OWORD64(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] = EFX_INSERT_FIELDS64(0, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[1] = EFX_INSERT_FIELDS64(64, 127, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_OWORD32(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] = EFX_INSERT_FIELDS32(0, 31, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[1] = EFX_INSERT_FIELDS32(32, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[2] = EFX_INSERT_FIELDS32(64, 95, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[3] = EFX_INSERT_FIELDS32(96, 127, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_QWORD64(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] = EFX_INSERT_FIELDS64(0, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_QWORD32(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] = EFX_INSERT_FIELDS32(0, 31, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[1] = EFX_INSERT_FIELDS32(32, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_DWORD(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_dword).ed_u32[0] = EFX_INSERT_FIELDS32(0, 31, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_WORD(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_word).ew_u16[0] = EFX_INSERT_FIELDS16(0, 15, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_BYTE(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_byte).eb_u8[0] = EFX_INSERT_FIELDS8(0, 7, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* Populate an octword field with various numbers of arguments */
+#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD
+
+#define EFX_POPULATE_OWORD_9(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_OWORD_10(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_OWORD_8(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_OWORD_9(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_OWORD_7(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_OWORD_8(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_OWORD_6(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_OWORD_7(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_OWORD_5(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_OWORD_6(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_OWORD_4(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_OWORD_5(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_OWORD_3(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_OWORD_4(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_OWORD_2(_oword, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_OWORD_3(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_OWORD_1(_oword, \
+ _field1, _value1) \
+ EFX_POPULATE_OWORD_2(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_OWORD(_oword) \
+ EFX_POPULATE_OWORD_1(_oword, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_OWORD(_oword) \
+ EFX_POPULATE_OWORD_4(_oword, \
+ EFX_DWORD_0, 0xffffffff, EFX_DWORD_1, 0xffffffff, \
+ EFX_DWORD_2, 0xffffffff, EFX_DWORD_3, 0xffffffff)
+
+/* Populate a quadword field with various numbers of arguments */
+#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD
+
+#define EFX_POPULATE_QWORD_9(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_QWORD_10(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_QWORD_8(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_QWORD_9(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_QWORD_7(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_QWORD_8(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_QWORD_6(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_QWORD_7(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_QWORD_5(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_QWORD_6(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_QWORD_4(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_QWORD_5(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_QWORD_3(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_QWORD_4(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_QWORD_2(_qword, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_QWORD_3(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_QWORD_1(_qword, \
+ _field1, _value1) \
+ EFX_POPULATE_QWORD_2(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_QWORD(_qword) \
+ EFX_POPULATE_QWORD_1(_qword, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_QWORD(_qword) \
+ EFX_POPULATE_QWORD_2(_qword, \
+ EFX_DWORD_0, 0xffffffff, EFX_DWORD_1, 0xffffffff)
+
+/* Populate a dword field with various numbers of arguments */
+#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD
+
+#define EFX_POPULATE_DWORD_9(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_DWORD_10(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_DWORD_8(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_DWORD_9(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_DWORD_7(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_DWORD_8(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_DWORD_6(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_DWORD_7(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_DWORD_5(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_DWORD_6(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_DWORD_4(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_DWORD_5(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_DWORD_3(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_DWORD_4(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_DWORD_2(_dword, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_DWORD_3(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_DWORD_1(_dword, \
+ _field1, _value1) \
+ EFX_POPULATE_DWORD_2(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_DWORD(_dword) \
+ EFX_POPULATE_DWORD_1(_dword, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_DWORD(_dword) \
+ EFX_POPULATE_DWORD_1(_dword, \
+ EFX_DWORD_0, 0xffffffff)
+
+/* Populate a word field with various numbers of arguments */
+#define EFX_POPULATE_WORD_10 EFX_POPULATE_WORD
+
+#define EFX_POPULATE_WORD_9(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_WORD_10(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_WORD_8(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_WORD_9(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_WORD_7(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_WORD_8(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_WORD_6(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_WORD_7(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_WORD_5(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_WORD_6(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_WORD_4(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_WORD_5(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_WORD_3(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_WORD_4(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_WORD_2(_word, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_WORD_3(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_WORD_1(_word, \
+ _field1, _value1) \
+ EFX_POPULATE_WORD_2(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_WORD(_word) \
+ EFX_POPULATE_WORD_1(_word, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_WORD(_word) \
+ EFX_POPULATE_WORD_1(_word, \
+ EFX_WORD_0, 0xffff)
+
+/* Populate a byte field with various numbers of arguments */
+#define EFX_POPULATE_BYTE_10 EFX_POPULATE_BYTE
+
+#define EFX_POPULATE_BYTE_9(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_BYTE_10(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_BYTE_8(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_BYTE_9(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_BYTE_7(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_BYTE_8(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_BYTE_6(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_BYTE_7(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_BYTE_5(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_BYTE_6(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_BYTE_4(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_BYTE_5(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_BYTE_3(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_BYTE_4(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_BYTE_2(_byte, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_BYTE_3(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_BYTE_1(_byte, \
+ _field1, _value1) \
+ EFX_POPULATE_BYTE_2(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_BYTE(_byte) \
+ EFX_POPULATE_BYTE_1(_byte, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_BYTE(_byte) \
+ EFX_POPULATE_BYTE_1(_byte, \
+ EFX_BYTE_0, 0xff)
+
+/*
+ * Modify a named field within an already-populated structure. Used
+ * for read-modify-write operations.
+ */
+
+#define EFX_INSERT_FIELD64(_min, _max, _field, _value) \
+ __CPU_TO_LE_64(EFX_INSERT_FIELD_NATIVE64(_min, _max, _field, _value))
+
+#define EFX_INSERT_FIELD32(_min, _max, _field, _value) \
+ __CPU_TO_LE_32(EFX_INSERT_FIELD_NATIVE32(_min, _max, _field, _value))
+
+#define EFX_INSERT_FIELD16(_min, _max, _field, _value) \
+ __CPU_TO_LE_16(EFX_INSERT_FIELD_NATIVE16(_min, _max, _field, _value))
+
+#define EFX_INSERT_FIELD8(_min, _max, _field, _value) \
+ __NATIVE_8(EFX_INSERT_FIELD_NATIVE8(_min, _max, _field, _value))
+
+#define EFX_INPLACE_MASK64(_min, _max, _field) \
+ EFX_INSERT_FIELD64(_min, _max, _field, EFX_MASK64(_field))
+
+#define EFX_INPLACE_MASK32(_min, _max, _field) \
+ EFX_INSERT_FIELD32(_min, _max, _field, EFX_MASK32(_field))
+
+#define EFX_INPLACE_MASK16(_min, _max, _field) \
+ EFX_INSERT_FIELD16(_min, _max, _field, EFX_MASK16(_field))
+
+#define EFX_INPLACE_MASK8(_min, _max, _field) \
+ EFX_INSERT_FIELD8(_min, _max, _field, EFX_MASK8(_field))
+
+#define EFX_SET_OWORD_FIELD64(_oword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] = (((_oword).eo_u64[0] & \
+ ~EFX_INPLACE_MASK64(0, 63, _field)) | \
+ EFX_INSERT_FIELD64(0, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[1] = (((_oword).eo_u64[1] & \
+ ~EFX_INPLACE_MASK64(64, 127, _field)) | \
+ EFX_INSERT_FIELD64(64, 127, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_OWORD_FIELD32(_oword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] = (((_oword).eo_u32[0] & \
+ ~EFX_INPLACE_MASK32(0, 31, _field)) | \
+ EFX_INSERT_FIELD32(0, 31, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[1] = (((_oword).eo_u32[1] & \
+ ~EFX_INPLACE_MASK32(32, 63, _field)) | \
+ EFX_INSERT_FIELD32(32, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[2] = (((_oword).eo_u32[2] & \
+ ~EFX_INPLACE_MASK32(64, 95, _field)) | \
+ EFX_INSERT_FIELD32(64, 95, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[3] = (((_oword).eo_u32[3] & \
+ ~EFX_INPLACE_MASK32(96, 127, _field)) | \
+ EFX_INSERT_FIELD32(96, 127, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_QWORD_FIELD64(_qword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] = (((_qword).eq_u64[0] & \
+ ~EFX_INPLACE_MASK64(0, 63, _field)) | \
+ EFX_INSERT_FIELD64(0, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_QWORD_FIELD32(_qword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] = (((_qword).eq_u32[0] & \
+ ~EFX_INPLACE_MASK32(0, 31, _field)) | \
+ EFX_INSERT_FIELD32(0, 31, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[1] = (((_qword).eq_u32[1] & \
+ ~EFX_INPLACE_MASK32(32, 63, _field)) | \
+ EFX_INSERT_FIELD32(32, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_DWORD_FIELD(_dword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_dword).ed_u32[0] = (((_dword).ed_u32[0] & \
+ ~EFX_INPLACE_MASK32(0, 31, _field)) | \
+ EFX_INSERT_FIELD32(0, 31, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_WORD_FIELD(_word, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_word).ew_u16[0] = (((_word).ew_u16[0] & \
+ ~EFX_INPLACE_MASK16(0, 15, _field)) | \
+ EFX_INSERT_FIELD16(0, 15, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_BYTE_FIELD(_byte, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_byte).eb_u8[0] = (((_byte).eb_u8[0] & \
+ ~EFX_INPLACE_MASK8(0, 7, _field)) | \
+ EFX_INSERT_FIELD8(0, 7, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/*
+ * Set or clear a numbered bit within an octword.
+ */
+
+#define EFX_SHIFT64(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 64) ? \
+ ((uint64_t)1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SHIFT32(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 32) ? \
+ ((uint32_t)1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SHIFT16(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 16) ? \
+ (uint16_t)(1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SHIFT8(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 8) ? \
+ (uint8_t)(1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SET_OWORD_BIT64(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] |= \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ (_oword).eo_u64[1] |= \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(64))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_OWORD_BIT32(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_oword).eo_u32[1] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ (_oword).eo_u32[2] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(64))); \
+ (_oword).eo_u32[3] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(96))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_OWORD_BIT64(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] &= \
+ __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ (_oword).eo_u64[1] &= \
+ __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(64))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_OWORD_BIT32(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_oword).eo_u32[1] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ (_oword).eo_u32[2] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(64))); \
+ (_oword).eo_u32[3] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(96))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_OWORD_BIT64(_oword, _bit) \
+ (((_oword).eo_u64[0] & \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0)))) || \
+ ((_oword).eo_u64[1] & \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(64)))))
+
+#define EFX_TEST_OWORD_BIT32(_oword, _bit) \
+ (((_oword).eo_u32[0] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0)))) || \
+ ((_oword).eo_u32[1] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32)))) || \
+ ((_oword).eo_u32[2] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(64)))) || \
+ ((_oword).eo_u32[3] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(96)))))
+
+
+#define EFX_SET_QWORD_BIT64(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] |= \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_QWORD_BIT32(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_qword).eq_u32[1] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_QWORD_BIT64(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] &= \
+ __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_QWORD_BIT32(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_qword).eq_u32[1] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_QWORD_BIT64(_qword, _bit) \
+ (((_qword).eq_u64[0] & \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0)))) != 0)
+
+#define EFX_TEST_QWORD_BIT32(_qword, _bit) \
+ (((_qword).eq_u32[0] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0)))) || \
+ ((_qword).eq_u32[1] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32)))))
+
+
+#define EFX_SET_DWORD_BIT(_dword, _bit) \
+ do { \
+ (_dword).ed_u32[0] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_DWORD_BIT(_dword, _bit) \
+ do { \
+ (_dword).ed_u32[0] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_DWORD_BIT(_dword, _bit) \
+ (((_dword).ed_u32[0] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0)))) != 0)
+
+
+#define EFX_SET_WORD_BIT(_word, _bit) \
+ do { \
+ (_word).ew_u16[0] |= \
+ __CPU_TO_LE_16(EFX_SHIFT16(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_WORD_BIT(_word, _bit) \
+ do { \
+ (_word).ew_u32[0] &= \
+ __CPU_TO_LE_16(~EFX_SHIFT16(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_WORD_BIT(_word, _bit) \
+ (((_word).ew_u16[0] & \
+ __CPU_TO_LE_16(EFX_SHIFT16(_bit, FIX_LINT(0)))) != 0)
+
+
+#define EFX_SET_BYTE_BIT(_byte, _bit) \
+ do { \
+ (_byte).eb_u8[0] |= \
+ __NATIVE_8(EFX_SHIFT8(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_BYTE_BIT(_byte, _bit) \
+ do { \
+ (_byte).eb_u8[0] &= \
+ __NATIVE_8(~EFX_SHIFT8(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_BYTE_BIT(_byte, _bit) \
+ (((_byte).eb_u8[0] & \
+ __NATIVE_8(EFX_SHIFT8(_bit, FIX_LINT(0)))) != 0)
+
+
+#define EFX_OR_OWORD64(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u64[0] |= (_oword2).eo_u64[0]; \
+ (_oword1).eo_u64[1] |= (_oword2).eo_u64[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_OWORD32(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u32[0] |= (_oword2).eo_u32[0]; \
+ (_oword1).eo_u32[1] |= (_oword2).eo_u32[1]; \
+ (_oword1).eo_u32[2] |= (_oword2).eo_u32[2]; \
+ (_oword1).eo_u32[3] |= (_oword2).eo_u32[3]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_OWORD64(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u64[0] &= (_oword2).eo_u64[0]; \
+ (_oword1).eo_u64[1] &= (_oword2).eo_u64[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_OWORD32(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u32[0] &= (_oword2).eo_u32[0]; \
+ (_oword1).eo_u32[1] &= (_oword2).eo_u32[1]; \
+ (_oword1).eo_u32[2] &= (_oword2).eo_u32[2]; \
+ (_oword1).eo_u32[3] &= (_oword2).eo_u32[3]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_QWORD64(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u64[0] |= (_qword2).eq_u64[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_QWORD32(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u32[0] |= (_qword2).eq_u32[0]; \
+ (_qword1).eq_u32[1] |= (_qword2).eq_u32[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_QWORD64(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u64[0] &= (_qword2).eq_u64[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_QWORD32(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u32[0] &= (_qword2).eq_u32[0]; \
+ (_qword1).eq_u32[1] &= (_qword2).eq_u32[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_DWORD(_dword1, _dword2) \
+ do { \
+ (_dword1).ed_u32[0] |= (_dword2).ed_u32[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_DWORD(_dword1, _dword2) \
+ do { \
+ (_dword1).ed_u32[0] &= (_dword2).ed_u32[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_WORD(_word1, _word2) \
+ do { \
+ (_word1).ew_u16[0] |= (_word2).ew_u16[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_WORD(_word1, _word2) \
+ do { \
+ (_word1).ew_u16[0] &= (_word2).ew_u16[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_BYTE(_byte1, _byte2) \
+ do { \
+ (_byte1).eb_u8[0] |= (_byte2).eb_u8[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_BYTE(_byte1, _byte2) \
+ do { \
+ (_byte1).eb_u8[0] &= (_byte2).eb_u8[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#if EFSYS_USE_UINT64
+#define EFX_OWORD_FIELD EFX_OWORD_FIELD64
+#define EFX_QWORD_FIELD EFX_QWORD_FIELD64
+#define EFX_OWORD_IS_EQUAL EFX_OWORD_IS_EQUAL64
+#define EFX_QWORD_IS_EQUAL EFX_QWORD_IS_EQUAL64
+#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64
+#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64
+#define EFX_OWORD_IS_SET EFX_OWORD_IS_SET64
+#define EFX_QWORD_IS_SET EFX_QWORD_IS_SET64
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
+#define EFX_SET_OWORD_BIT EFX_SET_OWORD_BIT64
+#define EFX_CLEAR_OWORD_BIT EFX_CLEAR_OWORD_BIT64
+#define EFX_TEST_OWORD_BIT EFX_TEST_OWORD_BIT64
+#define EFX_SET_QWORD_BIT EFX_SET_QWORD_BIT64
+#define EFX_CLEAR_QWORD_BIT EFX_CLEAR_QWORD_BIT64
+#define EFX_TEST_QWORD_BIT EFX_TEST_QWORD_BIT64
+#define EFX_OR_OWORD EFX_OR_OWORD64
+#define EFX_AND_OWORD EFX_AND_OWORD64
+#define EFX_OR_QWORD EFX_OR_QWORD64
+#define EFX_AND_QWORD EFX_AND_QWORD64
+#else
+#define EFX_OWORD_FIELD EFX_OWORD_FIELD32
+#define EFX_QWORD_FIELD EFX_QWORD_FIELD32
+#define EFX_OWORD_IS_EQUAL EFX_OWORD_IS_EQUAL32
+#define EFX_QWORD_IS_EQUAL EFX_QWORD_IS_EQUAL32
+#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32
+#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32
+#define EFX_OWORD_IS_SET EFX_OWORD_IS_SET32
+#define EFX_QWORD_IS_SET EFX_QWORD_IS_SET32
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
+#define EFX_SET_OWORD_BIT EFX_SET_OWORD_BIT32
+#define EFX_CLEAR_OWORD_BIT EFX_CLEAR_OWORD_BIT32
+#define EFX_TEST_OWORD_BIT EFX_TEST_OWORD_BIT32
+#define EFX_SET_QWORD_BIT EFX_SET_QWORD_BIT32
+#define EFX_CLEAR_QWORD_BIT EFX_CLEAR_QWORD_BIT32
+#define EFX_TEST_QWORD_BIT EFX_TEST_QWORD_BIT32
+#define EFX_OR_OWORD EFX_OR_OWORD32
+#define EFX_AND_OWORD EFX_AND_OWORD32
+#define EFX_OR_QWORD EFX_OR_QWORD32
+#define EFX_AND_QWORD EFX_AND_QWORD32
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_TYPES_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/efx_vpd.c b/src/seastar/dpdk/drivers/net/sfc/base/efx_vpd.c
new file mode 100644
index 00000000..1e47df2c
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/efx_vpd.c
@@ -0,0 +1,1016 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_VPD
+
+#define TAG_TYPE_LBN 7
+#define TAG_TYPE_WIDTH 1
+#define TAG_TYPE_LARGE_ITEM_DECODE 1
+#define TAG_TYPE_SMALL_ITEM_DECODE 0
+
+#define TAG_SMALL_ITEM_NAME_LBN 3
+#define TAG_SMALL_ITEM_NAME_WIDTH 4
+#define TAG_SMALL_ITEM_SIZE_LBN 0
+#define TAG_SMALL_ITEM_SIZE_WIDTH 3
+
+#define TAG_LARGE_ITEM_NAME_LBN 0
+#define TAG_LARGE_ITEM_NAME_WIDTH 7
+
+#define TAG_NAME_END_DECODE 0x0f
+#define TAG_NAME_ID_STRING_DECODE 0x02
+#define TAG_NAME_VPD_R_DECODE 0x10
+#define TAG_NAME_VPD_W_DECODE 0x11
+
+#if EFSYS_OPT_SIENA
+
+static const efx_vpd_ops_t __efx_vpd_siena_ops = {
+ siena_vpd_init, /* evpdo_init */
+ siena_vpd_size, /* evpdo_size */
+ siena_vpd_read, /* evpdo_read */
+ siena_vpd_verify, /* evpdo_verify */
+ siena_vpd_reinit, /* evpdo_reinit */
+ siena_vpd_get, /* evpdo_get */
+ siena_vpd_set, /* evpdo_set */
+ siena_vpd_next, /* evpdo_next */
+ siena_vpd_write, /* evpdo_write */
+ siena_vpd_fini, /* evpdo_fini */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static const efx_vpd_ops_t __efx_vpd_ef10_ops = {
+ ef10_vpd_init, /* evpdo_init */
+ ef10_vpd_size, /* evpdo_size */
+ ef10_vpd_read, /* evpdo_read */
+ ef10_vpd_verify, /* evpdo_verify */
+ ef10_vpd_reinit, /* evpdo_reinit */
+ ef10_vpd_get, /* evpdo_get */
+ ef10_vpd_set, /* evpdo_set */
+ ef10_vpd_next, /* evpdo_next */
+ ef10_vpd_write, /* evpdo_write */
+ ef10_vpd_fini, /* evpdo_fini */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_vpd_init(
+ __in efx_nic_t *enp)
+{
+ const efx_vpd_ops_t *evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_VPD));
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ evpdop = &__efx_vpd_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ evpdop = &__efx_vpd_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ evpdop = &__efx_vpd_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (evpdop->evpdo_init != NULL) {
+ if ((rc = evpdop->evpdo_init(enp)) != 0)
+ goto fail2;
+ }
+
+ enp->en_evpdop = evpdop;
+ enp->en_mod_flags |= EFX_MOD_VPD;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_size(enp, sizep)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_read(enp, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_verify(enp, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if (evpdop->evpdo_reinit == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = evpdop->evpdo_reinit(enp, data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_get(enp, data, size, evvp)) != 0) {
+ if (rc == ENOENT)
+ return (rc);
+
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_set(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_set(enp, data, size, evvp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_next(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_next(enp, data, size, evvp, contp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_write(enp, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_vpd_next_tag(
+ __in caddr_t data,
+ __in size_t size,
+ __inout unsigned int *offsetp,
+ __out efx_vpd_tag_t *tagp,
+ __out uint16_t *lengthp)
+{
+ efx_byte_t byte;
+ efx_word_t word;
+ uint8_t name;
+ uint16_t length;
+ size_t headlen;
+ efx_rc_t rc;
+
+ if (*offsetp >= size) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ EFX_POPULATE_BYTE_1(byte, EFX_BYTE_0, data[*offsetp]);
+
+ switch (EFX_BYTE_FIELD(byte, TAG_TYPE)) {
+ case TAG_TYPE_SMALL_ITEM_DECODE:
+ headlen = 1;
+
+ name = EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_NAME);
+ length = (uint16_t)EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_SIZE);
+
+ break;
+
+ case TAG_TYPE_LARGE_ITEM_DECODE:
+ headlen = 3;
+
+ if (*offsetp + headlen > size) {
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ name = EFX_BYTE_FIELD(byte, TAG_LARGE_ITEM_NAME);
+ EFX_POPULATE_WORD_2(word,
+ EFX_BYTE_0, data[*offsetp + 1],
+ EFX_BYTE_1, data[*offsetp + 2]);
+ length = EFX_WORD_FIELD(word, EFX_WORD_0);
+
+ break;
+
+ default:
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ if (*offsetp + headlen + length > size) {
+ rc = EFAULT;
+ goto fail3;
+ }
+
+ EFX_STATIC_ASSERT(TAG_NAME_END_DECODE == EFX_VPD_END);
+ EFX_STATIC_ASSERT(TAG_NAME_ID_STRING_DECODE == EFX_VPD_ID);
+ EFX_STATIC_ASSERT(TAG_NAME_VPD_R_DECODE == EFX_VPD_RO);
+ EFX_STATIC_ASSERT(TAG_NAME_VPD_W_DECODE == EFX_VPD_RW);
+ if (name != EFX_VPD_END && name != EFX_VPD_ID &&
+ name != EFX_VPD_RO) {
+ rc = EFAULT;
+ goto fail4;
+ }
+
+ *tagp = name;
+ *lengthp = length;
+ *offsetp += headlen;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_vpd_next_keyword(
+ __in_bcount(size) caddr_t tag,
+ __in size_t size,
+ __in unsigned int pos,
+ __out efx_vpd_keyword_t *keywordp,
+ __out uint8_t *lengthp)
+{
+ efx_vpd_keyword_t keyword;
+ uint8_t length;
+ efx_rc_t rc;
+
+ if (pos + 3U > size) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ keyword = EFX_VPD_KEYWORD(tag[pos], tag[pos + 1]);
+ length = tag[pos + 2];
+
+ if (length == 0 || pos + 3U + length > size) {
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ *keywordp = keyword;
+ *lengthp = length;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_length(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out size_t *lengthp)
+{
+ efx_vpd_tag_t tag;
+ unsigned int offset;
+ uint16_t taglen;
+ efx_rc_t rc;
+
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail1;
+ offset += taglen;
+ if (tag == EFX_VPD_END)
+ break;
+ }
+
+ *lengthp = offset;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_verify(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out_opt boolean_t *cksummedp)
+{
+ efx_vpd_tag_t tag;
+ efx_vpd_keyword_t keyword;
+ unsigned int offset;
+ unsigned int pos;
+ unsigned int i;
+ uint16_t taglen;
+ uint8_t keylen;
+ uint8_t cksum;
+ boolean_t cksummed = B_FALSE;
+ efx_rc_t rc;
+
+ /*
+ * Parse every tag,keyword in the existing VPD. If the csum is present,
+ * the assert it is correct, and is the final keyword in the RO block.
+ */
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail1;
+ if (tag == EFX_VPD_END)
+ break;
+ else if (tag == EFX_VPD_ID)
+ goto done;
+
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ /* RV keyword must be the last in the block */
+ if (cksummed) {
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail3;
+
+ if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ cksum = 0;
+ for (i = 0; i < offset + pos + 4; i++)
+ cksum += data[i];
+
+ if (cksum != 0) {
+ rc = EFAULT;
+ goto fail4;
+ }
+
+ cksummed = B_TRUE;
+ }
+ }
+
+ done:
+ offset += taglen;
+ }
+
+ if (!cksummed) {
+ rc = EFAULT;
+ goto fail5;
+ }
+
+ if (cksummedp != NULL)
+ *cksummedp = cksummed;
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static uint8_t __efx_vpd_blank_pid[] = {
+ /* Large resource type ID length 1 */
+ 0x82, 0x01, 0x00,
+ /* Product name ' ' */
+ 0x32,
+};
+
+static uint8_t __efx_vpd_blank_r[] = {
+ /* Large resource type VPD-R length 4 */
+ 0x90, 0x04, 0x00,
+ /* RV keyword length 1 */
+ 'R', 'V', 0x01,
+ /* RV payload checksum */
+ 0x00,
+};
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_reinit(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t wantpid)
+{
+ unsigned int offset = 0;
+ unsigned int pos;
+ efx_byte_t byte;
+ uint8_t cksum;
+ efx_rc_t rc;
+
+ if (size < 0x100) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ if (wantpid) {
+ memcpy(data + offset, __efx_vpd_blank_pid,
+ sizeof (__efx_vpd_blank_pid));
+ offset += sizeof (__efx_vpd_blank_pid);
+ }
+
+ memcpy(data + offset, __efx_vpd_blank_r, sizeof (__efx_vpd_blank_r));
+ offset += sizeof (__efx_vpd_blank_r);
+
+ /* Update checksum */
+ cksum = 0;
+ for (pos = 0; pos < offset; pos++)
+ cksum += data[pos];
+ data[offset - 1] -= cksum;
+
+ /* Append trailing tag */
+ EFX_POPULATE_BYTE_3(byte,
+ TAG_TYPE, TAG_TYPE_SMALL_ITEM_DECODE,
+ TAG_SMALL_ITEM_NAME, TAG_NAME_END_DECODE,
+ TAG_SMALL_ITEM_SIZE, 0);
+ data[offset] = EFX_BYTE_FIELD(byte, EFX_BYTE_0);
+ offset++;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_next(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_tag_t *tagp,
+ __out efx_vpd_keyword_t *keywordp,
+ __out_opt unsigned int *payloadp,
+ __out_opt uint8_t *paylenp,
+ __inout unsigned int *contp)
+{
+ efx_vpd_tag_t tag;
+ efx_vpd_keyword_t keyword = 0;
+ unsigned int offset;
+ unsigned int pos;
+ unsigned int index;
+ uint16_t taglen;
+ uint8_t keylen;
+ uint8_t paylen;
+ efx_rc_t rc;
+
+ offset = index = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail1;
+
+ if (tag == EFX_VPD_END) {
+ keyword = 0;
+ paylen = 0;
+ index = 0;
+ break;
+ }
+
+ if (tag == EFX_VPD_ID) {
+ if (index++ == *contp) {
+ EFSYS_ASSERT3U(taglen, <, 0x100);
+ keyword = 0;
+ paylen = (uint8_t)MIN(taglen, 0xff);
+
+ goto done;
+ }
+ } else {
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail2;
+
+ if (index++ == *contp) {
+ offset += pos + 3;
+ paylen = keylen;
+
+ goto done;
+ }
+ }
+ }
+
+ offset += taglen;
+ }
+
+done:
+ *tagp = tag;
+ *keywordp = keyword;
+ if (payloadp != NULL)
+ *payloadp = offset;
+ if (paylenp != NULL)
+ *paylenp = paylen;
+
+ *contp = index;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_get(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_tag_t tag,
+ __in efx_vpd_keyword_t keyword,
+ __out unsigned int *payloadp,
+ __out uint8_t *paylenp)
+{
+ efx_vpd_tag_t itag;
+ efx_vpd_keyword_t ikeyword;
+ unsigned int offset;
+ unsigned int pos;
+ uint16_t taglen;
+ uint8_t keylen;
+ efx_rc_t rc;
+
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &itag, &taglen)) != 0)
+ goto fail1;
+ if (itag == EFX_VPD_END)
+ break;
+
+ if (itag == tag) {
+ if (itag == EFX_VPD_ID) {
+ EFSYS_ASSERT3U(taglen, <, 0x100);
+
+ *paylenp = (uint8_t)MIN(taglen, 0xff);
+ *payloadp = offset;
+ return (0);
+ }
+
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &ikeyword, &keylen)) != 0)
+ goto fail2;
+
+ if (ikeyword == keyword) {
+ *paylenp = keylen;
+ *payloadp = offset + pos + 3;
+ return (0);
+ }
+ }
+ }
+
+ offset += taglen;
+ }
+
+ /* Not an error */
+ return (ENOENT);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_set(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ efx_word_t word;
+ efx_vpd_tag_t tag;
+ efx_vpd_keyword_t keyword;
+ unsigned int offset;
+ unsigned int pos;
+ unsigned int taghead;
+ unsigned int source;
+ unsigned int dest;
+ unsigned int i;
+ uint16_t taglen;
+ uint8_t keylen;
+ uint8_t cksum;
+ size_t used;
+ efx_rc_t rc;
+
+ switch (evvp->evv_tag) {
+ case EFX_VPD_ID:
+ if (evvp->evv_keyword != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Can't delete the ID keyword */
+ if (evvp->evv_length == 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+
+ case EFX_VPD_RO:
+ if (evvp->evv_keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Determine total size of all current tags */
+ if ((rc = efx_vpd_hunk_length(data, size, &used)) != 0)
+ goto fail2;
+
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ taghead = offset;
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail3;
+ if (tag == EFX_VPD_END)
+ break;
+ else if (tag != evvp->evv_tag) {
+ offset += taglen;
+ continue;
+ }
+
+ /* We only support modifying large resource tags */
+ if (offset - taghead != 3) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /*
+ * Work out the offset of the byte immediately after the
+ * old (=source) and new (=dest) new keyword/tag
+ */
+ pos = 0;
+ if (tag == EFX_VPD_ID) {
+ source = offset + taglen;
+ dest = offset + evvp->evv_length;
+ goto check_space;
+ }
+
+ EFSYS_ASSERT3U(tag, ==, EFX_VPD_RO);
+ source = dest = 0;
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail5;
+
+ if (keyword == evvp->evv_keyword &&
+ evvp->evv_length == 0) {
+ /* Deleting this keyword */
+ source = offset + pos + 3 + keylen;
+ dest = offset + pos;
+ break;
+
+ } else if (keyword == evvp->evv_keyword) {
+ /* Adjusting this keyword */
+ source = offset + pos + 3 + keylen;
+ dest = offset + pos + 3 + evvp->evv_length;
+ break;
+
+ } else if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ /* The RV keyword must be at the end */
+ EFSYS_ASSERT3U(pos + 3 + keylen, ==, taglen);
+
+ /*
+ * The keyword doesn't already exist. If the
+ * user deleting a non-existant keyword then
+ * this is a no-op.
+ */
+ if (evvp->evv_length == 0)
+ return (0);
+
+ /* Insert this keyword before the RV keyword */
+ source = offset + pos;
+ dest = offset + pos + 3 + evvp->evv_length;
+ break;
+ }
+ }
+
+ check_space:
+ if (used + dest > size + source) {
+ rc = ENOSPC;
+ goto fail6;
+ }
+
+ /* Move trailing data */
+ (void) memmove(data + dest, data + source, used - source);
+
+ /* Copy contents */
+ memcpy(data + dest - evvp->evv_length, evvp->evv_value,
+ evvp->evv_length);
+
+ /* Insert new keyword header if required */
+ if (tag != EFX_VPD_ID && evvp->evv_length > 0) {
+ EFX_POPULATE_WORD_1(word, EFX_WORD_0,
+ evvp->evv_keyword);
+ data[offset + pos + 0] =
+ EFX_WORD_FIELD(word, EFX_BYTE_0);
+ data[offset + pos + 1] =
+ EFX_WORD_FIELD(word, EFX_BYTE_1);
+ data[offset + pos + 2] = evvp->evv_length;
+ }
+
+ /* Modify tag length (large resource type) */
+ taglen += (dest - source);
+ EFX_POPULATE_WORD_1(word, EFX_WORD_0, taglen);
+ data[offset - 2] = EFX_WORD_FIELD(word, EFX_BYTE_0);
+ data[offset - 1] = EFX_WORD_FIELD(word, EFX_BYTE_1);
+
+ goto checksum;
+ }
+
+ /* Unable to find the matching tag */
+ rc = ENOENT;
+ goto fail7;
+
+checksum:
+ /* Find the RV tag, and update the checksum */
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail8;
+ if (tag == EFX_VPD_END)
+ break;
+ if (tag == EFX_VPD_RO) {
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail9;
+
+ if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ cksum = 0;
+ for (i = 0; i < offset + pos + 3; i++)
+ cksum += data[i];
+ data[i] = -cksum;
+ break;
+ }
+ }
+ }
+
+ offset += taglen;
+ }
+
+ /* Zero out the unused portion */
+ (void) memset(data + offset + taglen, 0xff, size - offset - taglen);
+
+ return (0);
+
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_vpd_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if (evpdop->evpdo_fini != NULL)
+ evpdop->evpdo_fini(enp);
+
+ enp->en_evpdop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_VPD;
+}
+
+#endif /* EFSYS_OPT_VPD */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/hunt_impl.h b/src/seastar/dpdk/drivers/net/sfc/base/hunt_impl.h
new file mode 100644
index 00000000..0e0c870f
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/hunt_impl.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_HUNT_IMPL_H
+#define _SYS_HUNT_IMPL_H
+
+#include "efx.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+#include "efx_mcdi.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Missing register definitions */
+#ifndef ER_DZ_TX_PIOBUF_OFST
+#define ER_DZ_TX_PIOBUF_OFST 0x00001000
+#endif
+#ifndef ER_DZ_TX_PIOBUF_STEP
+#define ER_DZ_TX_PIOBUF_STEP 8192
+#endif
+#ifndef ER_DZ_TX_PIOBUF_ROWS
+#define ER_DZ_TX_PIOBUF_ROWS 2048
+#endif
+
+#ifndef ER_DZ_TX_PIOBUF_SIZE
+#define ER_DZ_TX_PIOBUF_SIZE 2048
+#endif
+
+#define HUNT_PIOBUF_NBUFS (16)
+#define HUNT_PIOBUF_SIZE (ER_DZ_TX_PIOBUF_SIZE)
+
+#define HUNT_MIN_PIO_ALLOC_SIZE (HUNT_PIOBUF_SIZE / 32)
+
+
+/* NIC */
+
+extern __checkReturn efx_rc_t
+hunt_board_cfg(
+ __in efx_nic_t *enp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HUNT_IMPL_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/hunt_nic.c b/src/seastar/dpdk/drivers/net/sfc/base/hunt_nic.c
new file mode 100644
index 00000000..addbf1c5
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/hunt_nic.c
@@ -0,0 +1,402 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_HUNTINGTON
+
+#include "ef10_tlv_layout.h"
+
+static __checkReturn efx_rc_t
+hunt_nic_get_required_pcie_bandwidth(
+ __in efx_nic_t *enp,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t port_modes;
+ uint32_t max_port_mode;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ /*
+ * On Huntington, the firmware may not give us the current port mode, so
+ * we need to go by the set of available port modes and assume the most
+ * capable mode is in use.
+ */
+
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, NULL)) != 0) {
+ /* No port mode info available */
+ bandwidth = 0;
+ goto out;
+ }
+
+ if (port_modes & (1 << TLV_PORT_MODE_40G_40G)) {
+ /*
+ * This needs the full PCIe bandwidth (and could use
+ * more) - roughly 64 Gbit/s for 8 lanes of Gen3.
+ */
+ if ((rc = efx_nic_calculate_pcie_link_bandwidth(8,
+ EFX_PCIE_LINK_SPEED_GEN3, &bandwidth)) != 0)
+ goto fail1;
+ } else {
+ if (port_modes & (1 << TLV_PORT_MODE_40G)) {
+ max_port_mode = TLV_PORT_MODE_40G;
+ } else if (port_modes & (1 << TLV_PORT_MODE_10G_10G_10G_10G)) {
+ max_port_mode = TLV_PORT_MODE_10G_10G_10G_10G;
+ } else {
+ /* Assume two 10G ports */
+ max_port_mode = TLV_PORT_MODE_10G_10G;
+ }
+
+ if ((rc = ef10_nic_get_port_mode_bandwidth(max_port_mode,
+ &bandwidth)) != 0)
+ goto fail2;
+ }
+
+out:
+ *bandwidth_mbpsp = bandwidth;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+hunt_board_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint8_t mac_addr[6] = { 0 };
+ uint32_t board_type = 0;
+ ef10_link_state_t els;
+ efx_port_t *epp = &(enp->en_port);
+ uint32_t port;
+ uint32_t pf;
+ uint32_t vf;
+ uint32_t mask;
+ uint32_t flags;
+ uint32_t sysclk, dpcpu_clk;
+ uint32_t base, nvec;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
+ goto fail1;
+
+ /*
+ * NOTE: The MCDI protocol numbers ports from zero.
+ * The common code MCDI interface numbers ports from one.
+ */
+ emip->emi_port = port + 1;
+
+ if ((rc = ef10_external_port_mapping(enp, port,
+ &encp->enc_external_port)) != 0)
+ goto fail2;
+
+ /*
+ * Get PCIe function number from firmware (used for
+ * per-function privilege and dynamic config info).
+ * - PCIe PF: pf = PF number, vf = 0xffff.
+ * - PCIe VF: pf = parent PF, vf = VF number.
+ */
+ if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
+ goto fail3;
+
+ encp->enc_pf = pf;
+ encp->enc_vf = vf;
+
+ /* MAC address for this function */
+ if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+ rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
+ if ((rc == 0) && (mac_addr[0] & 0x02)) {
+ /*
+ * If the static config does not include a global MAC
+ * address pool then the board may return a locally
+ * administered MAC address (this should only happen on
+ * incorrectly programmed boards).
+ */
+ rc = EINVAL;
+ }
+ } else {
+ rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
+ }
+ if (rc != 0)
+ goto fail4;
+
+ EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
+
+ /* Board configuration */
+ rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
+ if (rc != 0) {
+ /* Unprivileged functions may not be able to read board cfg */
+ if (rc == EACCES)
+ board_type = 0;
+ else
+ goto fail5;
+ }
+
+ encp->enc_board_type = board_type;
+ encp->enc_clk_mult = 1; /* not used for Huntington */
+
+ /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
+ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
+ goto fail6;
+
+ /* Obtain the default PHY advertised capabilities */
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail7;
+ epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
+ epp->ep_adv_cap_mask = els.els_adv_cap_mask;
+
+ /*
+ * Enable firmware workarounds for hardware errata.
+ * Expected responses are:
+ * - 0 (zero):
+ * Success: workaround enabled or disabled as requested.
+ * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):
+ * Firmware does not support the MC_CMD_WORKAROUND request.
+ * (assume that the workaround is not supported).
+ * - MC_CMD_ERR_ENOENT (reported as ENOENT):
+ * Firmware does not support the requested workaround.
+ * - MC_CMD_ERR_EPERM (reported as EACCES):
+ * Unprivileged function cannot enable/disable workarounds.
+ *
+ * See efx_mcdi_request_errcode() for MCDI error translations.
+ */
+
+ /*
+ * If the bug35388 workaround is enabled, then use an indirect access
+ * method to avoid unsafe EVQ writes.
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG35388, B_TRUE,
+ NULL);
+ if ((rc == 0) || (rc == EACCES))
+ encp->enc_bug35388_workaround = B_TRUE;
+ else if ((rc == ENOTSUP) || (rc == ENOENT))
+ encp->enc_bug35388_workaround = B_FALSE;
+ else
+ goto fail8;
+
+ /*
+ * If the bug41750 workaround is enabled, then do not test interrupts,
+ * as the test will fail (seen with Greenport controllers).
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG41750, B_TRUE,
+ NULL);
+ if (rc == 0) {
+ encp->enc_bug41750_workaround = B_TRUE;
+ } else if (rc == EACCES) {
+ /* Assume a controller with 40G ports needs the workaround. */
+ if (epp->ep_default_adv_cap_mask & EFX_PHY_CAP_40000FDX)
+ encp->enc_bug41750_workaround = B_TRUE;
+ else
+ encp->enc_bug41750_workaround = B_FALSE;
+ } else if ((rc == ENOTSUP) || (rc == ENOENT)) {
+ encp->enc_bug41750_workaround = B_FALSE;
+ } else {
+ goto fail9;
+ }
+ if (EFX_PCI_FUNCTION_IS_VF(encp)) {
+ /* Interrupt testing does not work for VFs. See bug50084. */
+ encp->enc_bug41750_workaround = B_TRUE;
+ }
+
+ /*
+ * If the bug26807 workaround is enabled, then firmware has enabled
+ * support for chained multicast filters. Firmware will reset (FLR)
+ * functions which have filters in the hardware filter table when the
+ * workaround is enabled/disabled.
+ *
+ * We must recheck if the workaround is enabled after inserting the
+ * first hardware filter, in case it has been changed since this check.
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG26807,
+ B_TRUE, &flags);
+ if (rc == 0) {
+ encp->enc_bug26807_workaround = B_TRUE;
+ if (flags & (1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN)) {
+ /*
+ * Other functions had installed filters before the
+ * workaround was enabled, and they have been reset
+ * by firmware.
+ */
+ EFSYS_PROBE(bug26807_workaround_flr_done);
+ /* FIXME: bump MC warm boot count ? */
+ }
+ } else if (rc == EACCES) {
+ /*
+ * Unprivileged functions cannot enable the workaround in older
+ * firmware.
+ */
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else if ((rc == ENOTSUP) || (rc == ENOENT)) {
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else {
+ goto fail10;
+ }
+
+ /* Get clock frequencies (in MHz). */
+ if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
+ goto fail11;
+
+ /*
+ * The Huntington timer quantum is 1536 sysclk cycles, documented for
+ * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
+ */
+ encp->enc_evq_timer_quantum_ns = 1536000UL / sysclk; /* 1536 cycles */
+ if (encp->enc_bug35388_workaround) {
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ ERF_DD_EVQ_IND_TIMER_VAL_WIDTH) / 1000;
+ } else {
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
+ }
+
+ encp->enc_bug61265_workaround = B_FALSE; /* Medford only */
+
+ /* Check capabilities of running datapath firmware */
+ if ((rc = ef10_get_datapath_caps(enp)) != 0)
+ goto fail12;
+
+ /* Alignment for receive packet DMA buffers */
+ encp->enc_rx_buf_align_start = 1;
+ encp->enc_rx_buf_align_end = 64; /* RX DMA end padding */
+
+ /* Alignment for WPTR updates */
+ encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
+
+ encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
+ /* No boundary crossing limits */
+ encp->enc_tx_dma_desc_boundary = 0;
+
+ /*
+ * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
+ * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
+ * resources (allocated to this PCIe function), which is zero until
+ * after we have allocated VIs.
+ */
+ encp->enc_evq_limit = 1024;
+ encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
+ encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
+
+ /*
+ * The workaround for bug35388 uses the top bit of transmit queue
+ * descriptor writes, preventing the use of 4096 descriptor TXQs.
+ */
+ encp->enc_txq_max_ndescs = encp->enc_bug35388_workaround ? 2048 : 4096;
+
+ encp->enc_buftbl_limit = 0xFFFFFFFF;
+
+ encp->enc_piobuf_limit = HUNT_PIOBUF_NBUFS;
+ encp->enc_piobuf_size = HUNT_PIOBUF_SIZE;
+ encp->enc_piobuf_min_alloc_size = HUNT_MIN_PIO_ALLOC_SIZE;
+
+ /*
+ * Get the current privilege mask. Note that this may be modified
+ * dynamically, so this value is informational only. DO NOT use
+ * the privilege mask to check for sufficient privileges, as that
+ * can result in time-of-check/time-of-use bugs.
+ */
+ if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
+ goto fail13;
+ encp->enc_privilege_mask = mask;
+
+ /* Get interrupt vector limits */
+ if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
+ if (EFX_PCI_FUNCTION_IS_PF(encp))
+ goto fail14;
+
+ /* Ignore error (cannot query vector limits from a VF). */
+ base = 0;
+ nvec = 1024;
+ }
+ encp->enc_intr_vec_base = base;
+ encp->enc_intr_limit = nvec;
+
+ /*
+ * Maximum number of bytes into the frame the TCP header can start for
+ * firmware assisted TSO to work.
+ */
+ encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
+
+ if ((rc = hunt_nic_get_required_pcie_bandwidth(enp, &bandwidth)) != 0)
+ goto fail15;
+ encp->enc_required_pcie_bandwidth_mbps = bandwidth;
+
+ /* All Huntington devices have a PCIe Gen3, 8 lane connector */
+ encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
+
+ return (0);
+
+fail15:
+ EFSYS_PROBE(fail15);
+fail14:
+ EFSYS_PROBE(fail14);
+fail13:
+ EFSYS_PROBE(fail13);
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#endif /* EFSYS_OPT_HUNTINGTON */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/mcdi_mon.c b/src/seastar/dpdk/drivers/net/sfc/base/mcdi_mon.c
new file mode 100644
index 00000000..c5360c31
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/mcdi_mon.c
@@ -0,0 +1,565 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MON_MCDI
+
+#if EFSYS_OPT_MON_STATS
+
+#define MCDI_MON_NEXT_PAGE ((uint16_t)0xfffe)
+#define MCDI_MON_INVALID_SENSOR ((uint16_t)0xfffd)
+#define MCDI_MON_PAGE_SIZE 0x20
+
+/* Bitmasks of valid port(s) for each sensor */
+#define MCDI_MON_PORT_NONE (0x00)
+#define MCDI_MON_PORT_P1 (0x01)
+#define MCDI_MON_PORT_P2 (0x02)
+#define MCDI_MON_PORT_P3 (0x04)
+#define MCDI_MON_PORT_P4 (0x08)
+#define MCDI_MON_PORT_Px (0xFFFF)
+
+/* Get port mask from one-based MCDI port number */
+#define MCDI_MON_PORT_MASK(_emip) (1U << ((_emip)->emi_port - 1))
+
+/* Entry for MCDI sensor in sensor map */
+#define STAT(portmask, stat) \
+ { (MCDI_MON_PORT_##portmask), (EFX_MON_STAT_##stat) }
+
+/* Entry for sensor next page flag in sensor map */
+#define STAT_NEXT_PAGE() \
+ { MCDI_MON_PORT_NONE, MCDI_MON_NEXT_PAGE }
+
+/* Placeholder for gaps in the array */
+#define STAT_NO_SENSOR() \
+ { MCDI_MON_PORT_NONE, MCDI_MON_INVALID_SENSOR }
+
+/* Map from MC sensors to monitor statistics */
+static const struct mcdi_sensor_map_s {
+ uint16_t msm_port_mask;
+ uint16_t msm_stat;
+} mcdi_sensor_map[] = {
+ /* Sensor page 0 MC_CMD_SENSOR_xxx */
+ STAT(Px, INT_TEMP), /* 0x00 CONTROLLER_TEMP */
+ STAT(Px, EXT_TEMP), /* 0x01 PHY_COMMON_TEMP */
+ STAT(Px, INT_COOLING), /* 0x02 CONTROLLER_COOLING */
+ STAT(P1, EXT_TEMP), /* 0x03 PHY0_TEMP */
+ STAT(P1, EXT_COOLING), /* 0x04 PHY0_COOLING */
+ STAT(P2, EXT_TEMP), /* 0x05 PHY1_TEMP */
+ STAT(P2, EXT_COOLING), /* 0x06 PHY1_COOLING */
+ STAT(Px, 1V), /* 0x07 IN_1V0 */
+ STAT(Px, 1_2V), /* 0x08 IN_1V2 */
+ STAT(Px, 1_8V), /* 0x09 IN_1V8 */
+ STAT(Px, 2_5V), /* 0x0a IN_2V5 */
+ STAT(Px, 3_3V), /* 0x0b IN_3V3 */
+ STAT(Px, 12V), /* 0x0c IN_12V0 */
+ STAT(Px, 1_2VA), /* 0x0d IN_1V2A */
+ STAT(Px, VREF), /* 0x0e IN_VREF */
+ STAT(Px, VAOE), /* 0x0f OUT_VAOE */
+ STAT(Px, AOE_TEMP), /* 0x10 AOE_TEMP */
+ STAT(Px, PSU_AOE_TEMP), /* 0x11 PSU_AOE_TEMP */
+ STAT(Px, PSU_TEMP), /* 0x12 PSU_TEMP */
+ STAT(Px, FAN0), /* 0x13 FAN_0 */
+ STAT(Px, FAN1), /* 0x14 FAN_1 */
+ STAT(Px, FAN2), /* 0x15 FAN_2 */
+ STAT(Px, FAN3), /* 0x16 FAN_3 */
+ STAT(Px, FAN4), /* 0x17 FAN_4 */
+ STAT(Px, VAOE_IN), /* 0x18 IN_VAOE */
+ STAT(Px, IAOE), /* 0x19 OUT_IAOE */
+ STAT(Px, IAOE_IN), /* 0x1a IN_IAOE */
+ STAT(Px, NIC_POWER), /* 0x1b NIC_POWER */
+ STAT(Px, 0_9V), /* 0x1c IN_0V9 */
+ STAT(Px, I0_9V), /* 0x1d IN_I0V9 */
+ STAT(Px, I1_2V), /* 0x1e IN_I1V2 */
+ STAT_NEXT_PAGE(), /* 0x1f Next page flag (not a sensor) */
+
+ /* Sensor page 1 MC_CMD_SENSOR_xxx */
+ STAT(Px, 0_9V_ADC), /* 0x20 IN_0V9_ADC */
+ STAT(Px, INT_TEMP2), /* 0x21 CONTROLLER_2_TEMP */
+ STAT(Px, VREG_TEMP), /* 0x22 VREG_INTERNAL_TEMP */
+ STAT(Px, VREG_0_9V_TEMP), /* 0x23 VREG_0V9_TEMP */
+ STAT(Px, VREG_1_2V_TEMP), /* 0x24 VREG_1V2_TEMP */
+ STAT(Px, INT_VPTAT), /* 0x25 CTRLR. VPTAT */
+ STAT(Px, INT_ADC_TEMP), /* 0x26 CTRLR. INTERNAL_TEMP */
+ STAT(Px, EXT_VPTAT), /* 0x27 CTRLR. VPTAT_EXTADC */
+ STAT(Px, EXT_ADC_TEMP), /* 0x28 CTRLR. INTERNAL_TEMP_EXTADC */
+ STAT(Px, AMBIENT_TEMP), /* 0x29 AMBIENT_TEMP */
+ STAT(Px, AIRFLOW), /* 0x2a AIRFLOW */
+ STAT(Px, VDD08D_VSS08D_CSR), /* 0x2b VDD08D_VSS08D_CSR */
+ STAT(Px, VDD08D_VSS08D_CSR_EXTADC), /* 0x2c VDD08D_VSS08D_CSR_EXTADC */
+ STAT(Px, HOTPOINT_TEMP), /* 0x2d HOTPOINT_TEMP */
+ STAT(P1, PHY_POWER_SWITCH_PORT0), /* 0x2e PHY_POWER_SWITCH_PORT0 */
+ STAT(P2, PHY_POWER_SWITCH_PORT1), /* 0x2f PHY_POWER_SWITCH_PORT1 */
+ STAT(Px, MUM_VCC), /* 0x30 MUM_VCC */
+ STAT(Px, 0V9_A), /* 0x31 0V9_A */
+ STAT(Px, I0V9_A), /* 0x32 I0V9_A */
+ STAT(Px, 0V9_A_TEMP), /* 0x33 0V9_A_TEMP */
+ STAT(Px, 0V9_B), /* 0x34 0V9_B */
+ STAT(Px, I0V9_B), /* 0x35 I0V9_B */
+ STAT(Px, 0V9_B_TEMP), /* 0x36 0V9_B_TEMP */
+ STAT(Px, CCOM_AVREG_1V2_SUPPLY), /* 0x37 CCOM_AVREG_1V2_SUPPLY */
+ STAT(Px, CCOM_AVREG_1V2_SUPPLY_EXT_ADC),
+ /* 0x38 CCOM_AVREG_1V2_SUPPLY_EXT_ADC */
+ STAT(Px, CCOM_AVREG_1V8_SUPPLY), /* 0x39 CCOM_AVREG_1V8_SUPPLY */
+ STAT(Px, CCOM_AVREG_1V8_SUPPLY_EXT_ADC),
+ /* 0x3a CCOM_AVREG_1V8_SUPPLY_EXT_ADC */
+ STAT_NO_SENSOR(), /* 0x3b (no sensor) */
+ STAT_NO_SENSOR(), /* 0x3c (no sensor) */
+ STAT_NO_SENSOR(), /* 0x3d (no sensor) */
+ STAT_NO_SENSOR(), /* 0x3e (no sensor) */
+ STAT_NEXT_PAGE(), /* 0x3f Next page flag (not a sensor) */
+
+ /* Sensor page 2 MC_CMD_SENSOR_xxx */
+ STAT(Px, CONTROLLER_MASTER_VPTAT), /* 0x40 MASTER_VPTAT */
+ STAT(Px, CONTROLLER_MASTER_INTERNAL_TEMP), /* 0x41 MASTER_INT_TEMP */
+ STAT(Px, CONTROLLER_MASTER_VPTAT_EXT_ADC), /* 0x42 MAST_VPTAT_EXT_ADC */
+ STAT(Px, CONTROLLER_MASTER_INTERNAL_TEMP_EXT_ADC),
+ /* 0x43 MASTER_INTERNAL_TEMP_EXT_ADC */
+ STAT(Px, CONTROLLER_SLAVE_VPTAT), /* 0x44 SLAVE_VPTAT */
+ STAT(Px, CONTROLLER_SLAVE_INTERNAL_TEMP), /* 0x45 SLAVE_INTERNAL_TEMP */
+ STAT(Px, CONTROLLER_SLAVE_VPTAT_EXT_ADC), /* 0x46 SLAVE_VPTAT_EXT_ADC */
+ STAT(Px, CONTROLLER_SLAVE_INTERNAL_TEMP_EXT_ADC),
+ /* 0x47 SLAVE_INTERNAL_TEMP_EXT_ADC */
+ STAT_NO_SENSOR(), /* 0x48 (no sensor) */
+ STAT(Px, SODIMM_VOUT), /* 0x49 SODIMM_VOUT */
+ STAT(Px, SODIMM_0_TEMP), /* 0x4a SODIMM_0_TEMP */
+ STAT(Px, SODIMM_1_TEMP), /* 0x4b SODIMM_1_TEMP */
+ STAT(Px, PHY0_VCC), /* 0x4c PHY0_VCC */
+ STAT(Px, PHY1_VCC), /* 0x4d PHY1_VCC */
+ STAT(Px, CONTROLLER_TDIODE_TEMP), /* 0x4e CONTROLLER_TDIODE_TEMP */
+ STAT(Px, BOARD_FRONT_TEMP), /* 0x4f BOARD_FRONT_TEMP */
+ STAT(Px, BOARD_BACK_TEMP), /* 0x50 BOARD_BACK_TEMP */
+};
+
+#define MCDI_STATIC_SENSOR_ASSERT(_field) \
+ EFX_STATIC_ASSERT(MC_CMD_SENSOR_STATE_ ## _field \
+ == EFX_MON_STAT_STATE_ ## _field)
+
+static void
+mcdi_mon_decode_stats(
+ __in efx_nic_t *enp,
+ __in_bcount(sensor_mask_size) uint32_t *sensor_mask,
+ __in size_t sensor_mask_size,
+ __in_opt efsys_mem_t *esmp,
+ __out_bcount_opt(sensor_mask_size) uint32_t *stat_maskp,
+ __inout_ecount_opt(EFX_MON_NSTATS) efx_mon_stat_value_t *stat)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ uint16_t port_mask;
+ uint16_t sensor;
+ size_t sensor_max;
+ uint32_t stat_mask[(EFX_ARRAY_SIZE(mcdi_sensor_map) + 31) / 32];
+ uint32_t idx = 0;
+ uint32_t page = 0;
+
+ /* Assert the MC_CMD_SENSOR and EFX_MON_STATE namespaces agree */
+ MCDI_STATIC_SENSOR_ASSERT(OK);
+ MCDI_STATIC_SENSOR_ASSERT(WARNING);
+ MCDI_STATIC_SENSOR_ASSERT(FATAL);
+ MCDI_STATIC_SENSOR_ASSERT(BROKEN);
+ MCDI_STATIC_SENSOR_ASSERT(NO_READING);
+
+ EFX_STATIC_ASSERT(sizeof (stat_mask[0]) * 8 ==
+ EFX_MON_MASK_ELEMENT_SIZE);
+ sensor_max =
+ MIN((8 * sensor_mask_size), EFX_ARRAY_SIZE(mcdi_sensor_map));
+
+ EFSYS_ASSERT(emip->emi_port > 0); /* MCDI port number is one-based */
+ port_mask = MCDI_MON_PORT_MASK(emip);
+
+ memset(stat_mask, 0, sizeof (stat_mask));
+
+ /*
+ * The MCDI sensor readings in the DMA buffer are a packed array of
+ * MC_CMD_SENSOR_VALUE_ENTRY structures, which only includes entries for
+ * supported sensors (bit set in sensor_mask). The sensor_mask and
+ * sensor readings do not include entries for the per-page NEXT_PAGE
+ * flag.
+ *
+ * sensor_mask may legitimately contain MCDI sensors that the driver
+ * does not understand.
+ */
+ for (sensor = 0; sensor < sensor_max; ++sensor) {
+ efx_mon_stat_t id = mcdi_sensor_map[sensor].msm_stat;
+
+ if ((sensor % MCDI_MON_PAGE_SIZE) == MC_CMD_SENSOR_PAGE0_NEXT) {
+ EFSYS_ASSERT3U(id, ==, MCDI_MON_NEXT_PAGE);
+ page++;
+ continue;
+ }
+ if (~(sensor_mask[page]) & (1U << sensor))
+ continue;
+ idx++;
+
+ if ((port_mask & mcdi_sensor_map[sensor].msm_port_mask) == 0)
+ continue;
+ EFSYS_ASSERT(id < EFX_MON_NSTATS);
+
+ /*
+ * stat_mask is a bitmask indexed by EFX_MON_* monitor statistic
+ * identifiers from efx_mon_stat_t (without NEXT_PAGE bits).
+ *
+ * If there is an entry in the MCDI sensor to monitor statistic
+ * map then the sensor reading is used for the value of the
+ * monitor statistic.
+ */
+ stat_mask[id / EFX_MON_MASK_ELEMENT_SIZE] |=
+ (1U << (id % EFX_MON_MASK_ELEMENT_SIZE));
+
+ if (stat != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
+ efx_dword_t dword;
+
+ /* Get MCDI sensor reading from DMA buffer */
+ EFSYS_MEM_READD(esmp, 4 * (idx - 1), &dword);
+
+ /* Update EFX monitor stat from MCDI sensor reading */
+ stat[id].emsv_value = (uint16_t)EFX_DWORD_FIELD(dword,
+ MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
+
+ stat[id].emsv_state = (uint16_t)EFX_DWORD_FIELD(dword,
+ MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
+ }
+ }
+
+ if (stat_maskp != NULL) {
+ memcpy(stat_maskp, stat_mask, sizeof (stat_mask));
+ }
+}
+
+ __checkReturn efx_rc_t
+mcdi_mon_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_mon_stat_t *idp,
+ __out efx_mon_stat_value_t *valuep)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint16_t port_mask;
+ uint16_t sensor;
+ uint16_t state;
+ uint16_t value;
+ efx_mon_stat_t id;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(emip->emi_port > 0); /* MCDI port number is one-based */
+ port_mask = MCDI_MON_PORT_MASK(emip);
+
+ sensor = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_MONITOR);
+ state = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_STATE);
+ value = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_VALUE);
+
+ /* Hardware must support this MCDI sensor */
+ EFSYS_ASSERT3U(sensor, <, (8 * encp->enc_mcdi_sensor_mask_size));
+ EFSYS_ASSERT((sensor % MCDI_MON_PAGE_SIZE) != MC_CMD_SENSOR_PAGE0_NEXT);
+ EFSYS_ASSERT(encp->enc_mcdi_sensor_maskp != NULL);
+ EFSYS_ASSERT((encp->enc_mcdi_sensor_maskp[sensor / MCDI_MON_PAGE_SIZE] &
+ (1U << (sensor % MCDI_MON_PAGE_SIZE))) != 0);
+
+ /* But we don't have to understand it */
+ if (sensor >= EFX_ARRAY_SIZE(mcdi_sensor_map)) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ id = mcdi_sensor_map[sensor].msm_stat;
+ if ((port_mask & mcdi_sensor_map[sensor].msm_port_mask) == 0)
+ return (ENODEV);
+ EFSYS_ASSERT(id < EFX_MON_NSTATS);
+
+ *idp = id;
+ valuep->emsv_value = value;
+ valuep->emsv_state = state;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_read_sensors(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint32_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_READ_SENSORS_EXT_IN_LEN,
+ MC_CMD_READ_SENSORS_EXT_OUT_LEN)];
+ uint32_t addr_lo, addr_hi;
+
+ req.emr_cmd = MC_CMD_READ_SENSORS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_READ_SENSORS_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_READ_SENSORS_EXT_OUT_LEN;
+
+ addr_lo = (uint32_t)(EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ addr_hi = (uint32_t)(EFSYS_MEM_ADDR(esmp) >> 32);
+
+ MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_DMA_ADDR_LO, addr_lo);
+ MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_DMA_ADDR_HI, addr_hi);
+ MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_LENGTH, size);
+
+ efx_mcdi_execute(enp, &req);
+
+ return (req.emr_rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_sensor_info_npages(
+ __in efx_nic_t *enp,
+ __out uint32_t *npagesp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SENSOR_INFO_EXT_IN_LEN,
+ MC_CMD_SENSOR_INFO_OUT_LENMAX)];
+ int page;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(npagesp != NULL);
+
+ page = 0;
+ do {
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SENSOR_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SENSOR_INFO_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SENSOR_INFO_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, SENSOR_INFO_EXT_IN_PAGE, page++);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ } while (MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK) &
+ (1U << MC_CMD_SENSOR_PAGE0_NEXT));
+
+ *npagesp = page;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_sensor_info(
+ __in efx_nic_t *enp,
+ __out_ecount(npages) uint32_t *sensor_maskp,
+ __in size_t npages)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SENSOR_INFO_EXT_IN_LEN,
+ MC_CMD_SENSOR_INFO_OUT_LENMAX)];
+ uint32_t page;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(sensor_maskp != NULL);
+
+ for (page = 0; page < npages; page++) {
+ uint32_t mask;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SENSOR_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SENSOR_INFO_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SENSOR_INFO_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, SENSOR_INFO_EXT_IN_PAGE, page);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ mask = MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK);
+
+ if ((page != (npages - 1)) &&
+ ((mask & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) == 0)) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ sensor_maskp[page] = mask;
+ }
+
+ if (sensor_maskp[npages - 1] & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+mcdi_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t size = encp->enc_mon_stat_dma_buf_size;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_read_sensors(enp, esmp, size)) != 0)
+ goto fail1;
+
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, size);
+
+ mcdi_mon_decode_stats(enp,
+ encp->enc_mcdi_sensor_maskp,
+ encp->enc_mcdi_sensor_mask_size,
+ esmp, NULL, values);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+mcdi_mon_cfg_build(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t npages;
+ efx_rc_t rc;
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ encp->enc_mon_type = EFX_MON_SFC90X0;
+ break;
+#endif
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ encp->enc_mon_type = EFX_MON_SFC91X0;
+ break;
+#endif
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ encp->enc_mon_type = EFX_MON_SFC92X0;
+ break;
+#endif
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Get mc sensor mask size */
+ npages = 0;
+ if ((rc = efx_mcdi_sensor_info_npages(enp, &npages)) != 0)
+ goto fail2;
+
+ encp->enc_mon_stat_dma_buf_size = npages * EFX_MON_STATS_PAGE_SIZE;
+ encp->enc_mcdi_sensor_mask_size = npages * sizeof (uint32_t);
+
+ /* Allocate mc sensor mask */
+ EFSYS_KMEM_ALLOC(enp->en_esip,
+ encp->enc_mcdi_sensor_mask_size,
+ encp->enc_mcdi_sensor_maskp);
+
+ if (encp->enc_mcdi_sensor_maskp == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ /* Read mc sensor mask */
+ if ((rc = efx_mcdi_sensor_info(enp,
+ encp->enc_mcdi_sensor_maskp,
+ npages)) != 0)
+ goto fail4;
+
+ /* Build monitor statistics mask */
+ mcdi_mon_decode_stats(enp,
+ encp->enc_mcdi_sensor_maskp,
+ encp->enc_mcdi_sensor_mask_size,
+ NULL, encp->enc_mon_stat_mask, NULL);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+ EFSYS_KMEM_FREE(enp->en_esip,
+ encp->enc_mcdi_sensor_mask_size,
+ encp->enc_mcdi_sensor_maskp);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+mcdi_mon_cfg_free(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+
+ if (encp->enc_mcdi_sensor_maskp != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip,
+ encp->enc_mcdi_sensor_mask_size,
+ encp->enc_mcdi_sensor_maskp);
+ }
+}
+
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+#endif /* EFSYS_OPT_MON_MCDI */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/mcdi_mon.h b/src/seastar/dpdk/drivers/net/sfc/base/mcdi_mon.h
new file mode 100644
index 00000000..e07b5280
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/mcdi_mon.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_MCDI_MON_H
+#define _SYS_MCDI_MON_H
+
+#include "efx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if EFSYS_OPT_MON_MCDI
+
+#if EFSYS_OPT_MON_STATS
+
+ __checkReturn efx_rc_t
+mcdi_mon_cfg_build(
+ __in efx_nic_t *enp);
+
+ void
+mcdi_mon_cfg_free(
+ __in efx_nic_t *enp);
+
+
+extern __checkReturn efx_rc_t
+mcdi_mon_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_mon_stat_t *idp,
+ __out efx_mon_stat_value_t *valuep);
+
+extern __checkReturn efx_rc_t
+mcdi_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values);
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+#endif /* EFSYS_OPT_MON_MCDI */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_MCDI_MON_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/medford_impl.h b/src/seastar/dpdk/drivers/net/sfc/base/medford_impl.h
new file mode 100644
index 00000000..de2f5cf0
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/medford_impl.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_MEDFORD_IMPL_H
+#define _SYS_MEDFORD_IMPL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Alignment requirement for value written to RX WPTR:
+ * the WPTR must be aligned to an 8 descriptor boundary
+ *
+ * FIXME: Is this the same on Medford as Huntington?
+ */
+#define MEDFORD_RX_WPTR_ALIGN 8
+
+
+
+#ifndef ER_EZ_TX_PIOBUF_SIZE
+#define ER_EZ_TX_PIOBUF_SIZE 4096
+#endif
+
+
+#define MEDFORD_PIOBUF_NBUFS (16)
+#define MEDFORD_PIOBUF_SIZE (ER_EZ_TX_PIOBUF_SIZE)
+
+#define MEDFORD_MIN_PIO_ALLOC_SIZE (MEDFORD_PIOBUF_SIZE / 32)
+
+
+extern __checkReturn efx_rc_t
+medford_board_cfg(
+ __in efx_nic_t *enp);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_MEDFORD_IMPL_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/medford_nic.c b/src/seastar/dpdk/drivers/net/sfc/base/medford_nic.c
new file mode 100644
index 00000000..07afac1e
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/medford_nic.c
@@ -0,0 +1,402 @@
+/*
+ * Copyright (c) 2015-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_MEDFORD
+
+static __checkReturn efx_rc_t
+efx_mcdi_get_rxdp_config(
+ __in efx_nic_t *enp,
+ __out uint32_t *end_paddingp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_RXDP_CONFIG_IN_LEN,
+ MC_CMD_GET_RXDP_CONFIG_OUT_LEN)];
+ uint32_t end_padding;
+ efx_rc_t rc;
+
+ memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
+ GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) {
+ /* RX DMA end padding is disabled */
+ end_padding = 0;
+ } else {
+ switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
+ GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) {
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64:
+ end_padding = 64;
+ break;
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128:
+ end_padding = 128;
+ break;
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256:
+ end_padding = 256;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ }
+
+ *end_paddingp = end_padding;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+medford_nic_get_required_pcie_bandwidth(
+ __in efx_nic_t *enp,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t port_modes;
+ uint32_t current_mode;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes,
+ &current_mode)) != 0) {
+ /* No port mode info available. */
+ bandwidth = 0;
+ goto out;
+ }
+
+ if ((rc = ef10_nic_get_port_mode_bandwidth(current_mode,
+ &bandwidth)) != 0)
+ goto fail1;
+
+out:
+ *bandwidth_mbpsp = bandwidth;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+medford_board_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint8_t mac_addr[6] = { 0 };
+ uint32_t board_type = 0;
+ ef10_link_state_t els;
+ efx_port_t *epp = &(enp->en_port);
+ uint32_t port;
+ uint32_t pf;
+ uint32_t vf;
+ uint32_t mask;
+ uint32_t sysclk, dpcpu_clk;
+ uint32_t base, nvec;
+ uint32_t end_padding;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ /*
+ * FIXME: Likely to be incomplete and incorrect.
+ * Parts of this should be shared with Huntington.
+ */
+
+ if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
+ goto fail1;
+
+ /*
+ * NOTE: The MCDI protocol numbers ports from zero.
+ * The common code MCDI interface numbers ports from one.
+ */
+ emip->emi_port = port + 1;
+
+ if ((rc = ef10_external_port_mapping(enp, port,
+ &encp->enc_external_port)) != 0)
+ goto fail2;
+
+ /*
+ * Get PCIe function number from firmware (used for
+ * per-function privilege and dynamic config info).
+ * - PCIe PF: pf = PF number, vf = 0xffff.
+ * - PCIe VF: pf = parent PF, vf = VF number.
+ */
+ if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
+ goto fail3;
+
+ encp->enc_pf = pf;
+ encp->enc_vf = vf;
+
+ /* MAC address for this function */
+ if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+ rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
+#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
+ /* Disable static config checking for Medford NICs, ONLY
+ * for manufacturing test and setup at the factory, to
+ * allow the static config to be installed.
+ */
+#else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
+ if ((rc == 0) && (mac_addr[0] & 0x02)) {
+ /*
+ * If the static config does not include a global MAC
+ * address pool then the board may return a locally
+ * administered MAC address (this should only happen on
+ * incorrectly programmed boards).
+ */
+ rc = EINVAL;
+ }
+#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
+ } else {
+ rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
+ }
+ if (rc != 0)
+ goto fail4;
+
+ EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
+
+ /* Board configuration */
+ rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
+ if (rc != 0) {
+ /* Unprivileged functions may not be able to read board cfg */
+ if (rc == EACCES)
+ board_type = 0;
+ else
+ goto fail5;
+ }
+
+ encp->enc_board_type = board_type;
+ encp->enc_clk_mult = 1; /* not used for Medford */
+
+ /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
+ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
+ goto fail6;
+
+ /* Obtain the default PHY advertised capabilities */
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail7;
+ epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
+ epp->ep_adv_cap_mask = els.els_adv_cap_mask;
+
+ /*
+ * Enable firmware workarounds for hardware errata.
+ * Expected responses are:
+ * - 0 (zero):
+ * Success: workaround enabled or disabled as requested.
+ * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):
+ * Firmware does not support the MC_CMD_WORKAROUND request.
+ * (assume that the workaround is not supported).
+ * - MC_CMD_ERR_ENOENT (reported as ENOENT):
+ * Firmware does not support the requested workaround.
+ * - MC_CMD_ERR_EPERM (reported as EACCES):
+ * Unprivileged function cannot enable/disable workarounds.
+ *
+ * See efx_mcdi_request_errcode() for MCDI error translations.
+ */
+
+
+ if (EFX_PCI_FUNCTION_IS_VF(encp)) {
+ /*
+ * Interrupt testing does not work for VFs. See bug50084.
+ * FIXME: Does this still apply to Medford?
+ */
+ encp->enc_bug41750_workaround = B_TRUE;
+ }
+
+ /* Chained multicast is always enabled on Medford */
+ encp->enc_bug26807_workaround = B_TRUE;
+
+ /*
+ * If the bug61265 workaround is enabled, then interrupt holdoff timers
+ * cannot be controlled by timer table writes, so MCDI must be used
+ * (timer table writes can still be used for wakeup timers).
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG61265, B_TRUE,
+ NULL);
+ if ((rc == 0) || (rc == EACCES))
+ encp->enc_bug61265_workaround = B_TRUE;
+ else if ((rc == ENOTSUP) || (rc == ENOENT))
+ encp->enc_bug61265_workaround = B_FALSE;
+ else
+ goto fail8;
+
+ /* Get clock frequencies (in MHz). */
+ if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
+ goto fail9;
+
+ /*
+ * The Medford timer quantum is 1536 dpcpu_clk cycles, documented for
+ * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
+ */
+ encp->enc_evq_timer_quantum_ns = 1536000UL / dpcpu_clk; /* 1536 cycles */
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
+
+ /* Check capabilities of running datapath firmware */
+ if ((rc = ef10_get_datapath_caps(enp)) != 0)
+ goto fail10;
+
+ /* Alignment for receive packet DMA buffers */
+ encp->enc_rx_buf_align_start = 1;
+
+ /* Get the RX DMA end padding alignment configuration */
+ if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) {
+ if (rc != EACCES)
+ goto fail11;
+
+ /* Assume largest tail padding size supported by hardware */
+ end_padding = 256;
+ }
+ encp->enc_rx_buf_align_end = end_padding;
+
+ /* Alignment for WPTR updates */
+ encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
+
+ encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
+ /* No boundary crossing limits */
+ encp->enc_tx_dma_desc_boundary = 0;
+
+ /*
+ * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
+ * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
+ * resources (allocated to this PCIe function), which is zero until
+ * after we have allocated VIs.
+ */
+ encp->enc_evq_limit = 1024;
+ encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
+ encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
+
+ /*
+ * The maximum supported transmit queue size is 2048. TXQs with 4096
+ * descriptors are not supported as the top bit is used for vfifo
+ * stuffing.
+ */
+ encp->enc_txq_max_ndescs = 2048;
+
+ encp->enc_buftbl_limit = 0xFFFFFFFF;
+
+ encp->enc_piobuf_limit = MEDFORD_PIOBUF_NBUFS;
+ encp->enc_piobuf_size = MEDFORD_PIOBUF_SIZE;
+ encp->enc_piobuf_min_alloc_size = MEDFORD_MIN_PIO_ALLOC_SIZE;
+
+ /*
+ * Get the current privilege mask. Note that this may be modified
+ * dynamically, so this value is informational only. DO NOT use
+ * the privilege mask to check for sufficient privileges, as that
+ * can result in time-of-check/time-of-use bugs.
+ */
+ if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
+ goto fail12;
+ encp->enc_privilege_mask = mask;
+
+ /* Get interrupt vector limits */
+ if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
+ if (EFX_PCI_FUNCTION_IS_PF(encp))
+ goto fail13;
+
+ /* Ignore error (cannot query vector limits from a VF). */
+ base = 0;
+ nvec = 1024;
+ }
+ encp->enc_intr_vec_base = base;
+ encp->enc_intr_limit = nvec;
+
+ /*
+ * Maximum number of bytes into the frame the TCP header can start for
+ * firmware assisted TSO to work.
+ */
+ encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
+
+ /*
+ * Medford stores a single global copy of VPD, not per-PF as on
+ * Huntington.
+ */
+ encp->enc_vpd_is_global = B_TRUE;
+
+ rc = medford_nic_get_required_pcie_bandwidth(enp, &bandwidth);
+ if (rc != 0)
+ goto fail14;
+ encp->enc_required_pcie_bandwidth_mbps = bandwidth;
+ encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
+
+ return (0);
+
+fail14:
+ EFSYS_PROBE(fail14);
+fail13:
+ EFSYS_PROBE(fail13);
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MEDFORD */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/siena_flash.h b/src/seastar/dpdk/drivers/net/sfc/base/siena_flash.h
new file mode 100644
index 00000000..e2700554
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/siena_flash.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_SIENA_FLASH_H
+#define _SYS_SIENA_FLASH_H
+
+#pragma pack(1)
+
+/* Fixed locations near the start of flash (which may be in the internal PHY
+ * firmware header) point to the boot header.
+ *
+ * - parsed by MC boot ROM and firmware
+ * - reserved (but not parsed) by PHY firmware
+ * - opaque to driver
+ */
+
+#define SIENA_MC_BOOT_PHY_FW_HDR_LEN (0x20)
+
+#define SIENA_MC_BOOT_PTR_LOCATION (0x18) /* First thing we try to boot */
+#define SIENA_MC_BOOT_ALT_PTR_LOCATION (0x1c) /* Alternative if that fails */
+
+#define SIENA_MC_BOOT_HDR_LEN (0x200)
+
+#define SIENA_MC_BOOT_MAGIC (0x51E4A001)
+#define SIENA_MC_BOOT_VERSION (1)
+
+
+/*Structures supporting an arbitrary number of binary blobs in the flash image
+ intended to house code and tables for the satellite cpus*/
+/*thanks to random.org for:*/
+#define BLOBS_HEADER_MAGIC (0xBDA3BBD4)
+#define BLOB_HEADER_MAGIC (0xA1478A91)
+
+typedef struct blobs_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic;
+ efx_dword_t no_of_blobs;
+} blobs_hdr_t;
+
+typedef struct blob_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic;
+ efx_dword_t cpu_type;
+ efx_dword_t build_variant;
+ efx_dword_t offset;
+ efx_dword_t length;
+ efx_dword_t checksum;
+} blob_hdr_t;
+
+#define BLOB_CPU_TYPE_TXDI_TEXT (0)
+#define BLOB_CPU_TYPE_RXDI_TEXT (1)
+#define BLOB_CPU_TYPE_TXDP_TEXT (2)
+#define BLOB_CPU_TYPE_RXDP_TEXT (3)
+#define BLOB_CPU_TYPE_RXHRSL_HR_LUT (4)
+#define BLOB_CPU_TYPE_RXHRSL_HR_LUT_CFG (5)
+#define BLOB_CPU_TYPE_TXHRSL_HR_LUT (6)
+#define BLOB_CPU_TYPE_TXHRSL_HR_LUT_CFG (7)
+#define BLOB_CPU_TYPE_RXHRSL_HR_PGM (8)
+#define BLOB_CPU_TYPE_RXHRSL_SL_PGM (9)
+#define BLOB_CPU_TYPE_TXHRSL_HR_PGM (10)
+#define BLOB_CPU_TYPE_TXHRSL_SL_PGM (11)
+#define BLOB_CPU_TYPE_RXDI_VTBL0 (12)
+#define BLOB_CPU_TYPE_TXDI_VTBL0 (13)
+#define BLOB_CPU_TYPE_RXDI_VTBL1 (14)
+#define BLOB_CPU_TYPE_TXDI_VTBL1 (15)
+#define BLOB_CPU_TYPE_DUMPSPEC (32)
+#define BLOB_CPU_TYPE_MC_XIP (33)
+
+#define BLOB_CPU_TYPE_INVALID (31)
+
+/*
+ * The upper four bits of the CPU type field specify the compression
+ * algorithm used for this blob.
+ */
+#define BLOB_COMPRESSION_MASK (0xf0000000)
+#define BLOB_CPU_TYPE_MASK (0x0fffffff)
+
+#define BLOB_COMPRESSION_NONE (0x00000000) /* Stored as is */
+#define BLOB_COMPRESSION_LZ (0x10000000) /* see lib/lzdecoder.c */
+
+typedef struct siena_mc_boot_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_BOOT_MAGIC */
+ efx_word_t hdr_version; /* this structure definition is version 1 */
+ efx_byte_t board_type;
+ efx_byte_t firmware_version_a;
+ efx_byte_t firmware_version_b;
+ efx_byte_t firmware_version_c;
+ efx_word_t checksum; /* of whole header area + firmware image */
+ efx_word_t firmware_version_d;
+ efx_byte_t mcfw_subtype;
+ efx_byte_t generation; /* Valid for medford, SBZ for earlier chips */
+ efx_dword_t firmware_text_offset; /* offset to firmware .text */
+ efx_dword_t firmware_text_size; /* length of firmware .text, in bytes */
+ efx_dword_t firmware_data_offset; /* offset to firmware .data */
+ efx_dword_t firmware_data_size; /* length of firmware .data, in bytes */
+ efx_byte_t spi_rate; /* SPI rate for reading image, 0 is BootROM default */
+ efx_byte_t spi_phase_adj; /* SPI SDO/SCL phase adjustment, 0 is default (no adj) */
+ efx_word_t xpm_sector; /* The sector that contains the key, or 0xffff if unsigned (medford) SBZ (earlier) */
+ efx_dword_t reserved_c[7]; /* (set to 0) */
+} siena_mc_boot_hdr_t;
+
+#define SIENA_MC_BOOT_HDR_PADDING \
+ (SIENA_MC_BOOT_HDR_LEN - sizeof(siena_mc_boot_hdr_t))
+
+#define SIENA_MC_STATIC_CONFIG_MAGIC (0xBDCF5555)
+#define SIENA_MC_STATIC_CONFIG_VERSION (0)
+
+typedef struct siena_mc_static_config_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_STATIC_CONFIG_MAGIC */
+ efx_word_t length; /* of header area (i.e. not including VPD) */
+ efx_byte_t version;
+ efx_byte_t csum; /* over header area (i.e. not including VPD) */
+ efx_dword_t static_vpd_offset;
+ efx_dword_t static_vpd_length;
+ efx_dword_t capabilities;
+ efx_byte_t mac_addr_base[6];
+ efx_byte_t green_mode_cal; /* Green mode calibration result */
+ efx_byte_t green_mode_valid; /* Whether cal holds a valid value */
+ efx_word_t mac_addr_count;
+ efx_word_t mac_addr_stride;
+ efx_word_t calibrated_vref; /* Vref as measured during production */
+ efx_word_t adc_vref; /* Vref as read by ADC */
+ efx_dword_t reserved2[1]; /* (write as zero) */
+ efx_dword_t num_dbi_items;
+ struct {
+ efx_word_t addr;
+ efx_word_t byte_enables;
+ efx_dword_t value;
+ } dbi[];
+} siena_mc_static_config_hdr_t;
+
+/* This prefixes a valid XIP partition */
+#define XIP_PARTITION_MAGIC (0x51DEC0DE)
+
+#define SIENA_MC_DYNAMIC_CONFIG_MAGIC (0xBDCFDDDD)
+#define SIENA_MC_DYNAMIC_CONFIG_VERSION (0)
+
+typedef struct siena_mc_fw_version_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t fw_subtype;
+ efx_word_t version_w;
+ efx_word_t version_x;
+ efx_word_t version_y;
+ efx_word_t version_z;
+} siena_mc_fw_version_t;
+
+typedef struct siena_mc_dynamic_config_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_DYNAMIC_CONFIG_MAGIC */
+ efx_word_t length; /* of header area (i.e. not including VPD) */
+ efx_byte_t version;
+ efx_byte_t csum; /* over header area (i.e. not including VPD) */
+ efx_dword_t dynamic_vpd_offset;
+ efx_dword_t dynamic_vpd_length;
+ efx_dword_t num_fw_version_items;
+ siena_mc_fw_version_t fw_version[];
+} siena_mc_dynamic_config_hdr_t;
+
+#define SIENA_MC_EXPROM_SINGLE_MAGIC (0xAA55) /* little-endian uint16_t */
+
+#define SIENA_MC_EXPROM_COMBO_MAGIC (0xB0070102) /* little-endian uint32_t */
+#define SIENA_MC_EXPROM_COMBO_V2_MAGIC (0xB0070103) /* little-endian uint32_t */
+
+typedef struct siena_mc_combo_rom_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_EXPROM_COMBO_MAGIC or SIENA_MC_EXPROM_COMBO_V2_MAGIC */
+ union {
+ struct {
+ efx_dword_t len1; /* length of first image */
+ efx_dword_t len2; /* length of second image */
+ efx_dword_t off1; /* offset of first byte to edit to combine images */
+ efx_dword_t off2; /* offset of second byte to edit to combine images */
+ efx_word_t infoblk0_off;/* infoblk offset */
+ efx_word_t infoblk1_off;/* infoblk offset */
+ efx_byte_t infoblk_len;/* length of space reserved for one infoblk structure */
+ efx_byte_t reserved[7];/* (set to 0) */
+ } v1;
+ struct {
+ efx_dword_t len1; /* length of first image */
+ efx_dword_t len2; /* length of second image */
+ efx_dword_t off1; /* offset of first byte to edit to combine images */
+ efx_dword_t off2; /* offset of second byte to edit to combine images */
+ efx_word_t infoblk_off;/* infoblk start offset */
+ efx_word_t infoblk_count;/* infoblk count */
+ efx_byte_t infoblk_len;/* length of space reserved for one infoblk structure */
+ efx_byte_t reserved[7];/* (set to 0) */
+ } v2;
+ } data;
+} siena_mc_combo_rom_hdr_t;
+
+#pragma pack()
+
+#endif /* _SYS_SIENA_FLASH_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/siena_impl.h b/src/seastar/dpdk/drivers/net/sfc/base/siena_impl.h
new file mode 100644
index 00000000..ea6de983
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/siena_impl.h
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_SIENA_IMPL_H
+#define _SYS_SIENA_IMPL_H
+
+#include "efx.h"
+#include "efx_regs.h"
+#include "efx_mcdi.h"
+#include "siena_flash.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SIENA_NVRAM_CHUNK 0x80
+
+extern __checkReturn efx_rc_t
+siena_nic_probe(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_nic_reset(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_nic_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+siena_nic_register_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern void
+siena_nic_fini(
+ __in efx_nic_t *enp);
+
+extern void
+siena_nic_unprobe(
+ __in efx_nic_t *enp);
+
+#define SIENA_SRAM_ROWS 0x12000
+
+extern void
+siena_sram_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+siena_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_sram_pattern_fn_t func);
+
+#endif /* EFSYS_OPT_DIAG */
+
+#if EFSYS_OPT_MCDI
+
+extern __checkReturn efx_rc_t
+siena_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp);
+
+extern void
+siena_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in_bcount(hdr_len) void *hdrp,
+ __in size_t hdr_len,
+ __in_bcount(sdu_len) void *sdup,
+ __in size_t sdu_len);
+
+extern __checkReturn boolean_t
+siena_mcdi_poll_response(
+ __in efx_nic_t *enp);
+
+extern void
+siena_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length);
+
+extern efx_rc_t
+siena_mcdi_poll_reboot(
+ __in efx_nic_t *enp);
+
+extern void
+siena_mcdi_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp);
+
+extern void
+siena_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+siena_nvram_get_dynamic_cfg(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t vpd,
+ __out siena_mc_dynamic_config_hdr_t **dcfgp,
+ __out size_t *sizep);
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+siena_nvram_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+siena_nvram_get_subtype(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep);
+
+extern __checkReturn efx_rc_t
+siena_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4]);
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+siena_vpd_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+siena_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+siena_vpd_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_VPD */
+
+typedef struct siena_link_state_s {
+ uint32_t sls_adv_cap_mask;
+ uint32_t sls_lp_cap_mask;
+ unsigned int sls_fcntl;
+ efx_link_mode_t sls_link_mode;
+#if EFSYS_OPT_LOOPBACK
+ efx_loopback_type_t sls_loopback;
+#endif
+ boolean_t sls_mac_up;
+} siena_link_state_t;
+
+extern void
+siena_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+siena_phy_get_link(
+ __in efx_nic_t *enp,
+ __out siena_link_state_t *slsp);
+
+extern __checkReturn efx_rc_t
+siena_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t on);
+
+extern __checkReturn efx_rc_t
+siena_phy_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_phy_verify(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip);
+
+#if EFSYS_OPT_PHY_STATS
+
+extern void
+siena_phy_decode_stats(
+ __in efx_nic_t *enp,
+ __in uint32_t vmask,
+ __in_opt efsys_mem_t *esmp,
+ __out_opt uint64_t *smaskp,
+ __inout_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat);
+
+extern __checkReturn efx_rc_t
+siena_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat);
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+
+extern __checkReturn efx_rc_t
+siena_phy_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+extern __checkReturn efx_rc_t
+siena_phy_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count);
+
+extern void
+siena_phy_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+#endif /* EFSYS_OPT_BIST */
+
+extern __checkReturn efx_rc_t
+siena_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+siena_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp);
+
+extern __checkReturn efx_rc_t
+siena_mac_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu);
+
+#if EFSYS_OPT_LOOPBACK
+
+extern __checkReturn efx_rc_t
+siena_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type);
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+extern __checkReturn efx_rc_t
+siena_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size);
+
+extern __checkReturn efx_rc_t
+siena_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_SIENA_IMPL_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/siena_mac.c b/src/seastar/dpdk/drivers/net/sfc/base/siena_mac.c
new file mode 100644
index 00000000..29bbff8a
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/siena_mac.c
@@ -0,0 +1,476 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+ __checkReturn efx_rc_t
+siena_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ siena_link_state_t sls;
+ efx_rc_t rc;
+
+ if ((rc = siena_phy_get_link(enp, &sls)) != 0)
+ goto fail1;
+
+ epp->ep_adv_cap_mask = sls.sls_adv_cap_mask;
+ epp->ep_fcntl = sls.sls_fcntl;
+
+ *link_modep = sls.sls_link_mode;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp)
+{
+ siena_link_state_t sls;
+ efx_rc_t rc;
+
+ /*
+ * Because Siena doesn't *require* polling, we can't rely on
+ * siena_mac_poll() being executed to populate epp->ep_mac_up.
+ */
+ if ((rc = siena_phy_get_link(enp, &sls)) != 0)
+ goto fail1;
+
+ *mac_upp = sls.sls_mac_up;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_mac_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_oword_t multicast_hash[2];
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MAX(MC_CMD_SET_MAC_IN_LEN,
+ MC_CMD_SET_MAC_OUT_LEN),
+ MAX(MC_CMD_SET_MCAST_HASH_IN_LEN,
+ MC_CMD_SET_MCAST_HASH_OUT_LEN))];
+ unsigned int fcntl;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu);
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR),
+ epp->ep_mac_addr);
+ MCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, !epp->ep_all_unicst,
+ SET_MAC_IN_REJECT_BRDCST, !epp->ep_brdcst);
+
+ if (epp->ep_fcntl_autoneg)
+ /* efx_fcntl_set() has already set the phy capabilities */
+ fcntl = MC_CMD_FCNTL_AUTO;
+ else if (epp->ep_fcntl & EFX_FCNTL_RESPOND)
+ fcntl = (epp->ep_fcntl & EFX_FCNTL_GENERATE)
+ ? MC_CMD_FCNTL_BIDIR
+ : MC_CMD_FCNTL_RESPOND;
+ else
+ fcntl = MC_CMD_FCNTL_OFF;
+
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, fcntl);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* Push multicast hash */
+
+ if (epp->ep_all_mulcst) {
+ /* A hash matching all multicast is all 1s */
+ EFX_SET_OWORD(multicast_hash[0]);
+ EFX_SET_OWORD(multicast_hash[1]);
+ } else if (epp->ep_mulcst) {
+ /* Use the hash set by the multicast list */
+ multicast_hash[0] = epp->ep_multicst_hash[0];
+ multicast_hash[1] = epp->ep_multicst_hash[1];
+ } else {
+ /* A hash matching no traffic is simply 0 */
+ EFX_ZERO_OWORD(multicast_hash[0]);
+ EFX_ZERO_OWORD(multicast_hash[1]);
+ }
+
+ /*
+ * Broadcast packets go through the multicast hash filter.
+ * The IEEE 802.3 CRC32 of the broadcast address is 0xbe2612ff
+ * so we always add bit 0xff to the mask (bit 0x7f in the
+ * second octword).
+ */
+ if (epp->ep_brdcst) {
+ /*
+ * NOTE: due to constant folding, some of this evaluates
+ * to null expressions, giving E_EXPR_NULL_EFFECT during
+ * lint on Illumos. No good way to fix this without
+ * explicit coding the individual word/bit setting.
+ * So just suppress lint for this one line.
+ */
+ /* LINTED */
+ EFX_SET_OWORD_BIT(multicast_hash[1], 0x7f);
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MCAST_HASH;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MCAST_HASH_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MCAST_HASH_OUT_LEN;
+
+ memcpy(MCDI_IN2(req, uint8_t, SET_MCAST_HASH_IN_HASH0),
+ multicast_hash, sizeof (multicast_hash));
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_LOOPBACK
+
+ __checkReturn efx_rc_t
+siena_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_loopback_type_t old_loopback_type;
+ efx_link_mode_t old_loopback_link_mode;
+ efx_rc_t rc;
+
+ /* The PHY object handles this on Siena */
+ old_loopback_type = epp->ep_loopback_type;
+ old_loopback_link_mode = epp->ep_loopback_link_mode;
+ epp->ep_loopback_type = loopback_type;
+ epp->ep_loopback_link_mode = link_mode;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ epp->ep_loopback_type = old_loopback_type;
+ epp->ep_loopback_link_mode = old_loopback_link_mode;
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+ __checkReturn efx_rc_t
+siena_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size)
+{
+ const struct efx_mac_stats_range siena_stats[] = {
+ { EFX_MAC_RX_OCTETS, EFX_MAC_RX_GE_15XX_PKTS },
+ /* EFX_MAC_RX_ERRORS is not supported */
+ { EFX_MAC_RX_FCS_ERRORS, EFX_MAC_TX_EX_DEF_PKTS },
+ };
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ siena_stats, EFX_ARRAY_SIZE(siena_stats))) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#define SIENA_MAC_STAT_READ(_esmp, _field, _eqp) \
+ EFSYS_MEM_READQ((_esmp), (_field) * sizeof (efx_qword_t), _eqp)
+
+ __checkReturn efx_rc_t
+siena_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp)
+{
+ efx_qword_t value;
+ efx_qword_t generation_start;
+ efx_qword_t generation_end;
+
+ _NOTE(ARGUNUSED(enp))
+
+ /* Read END first so we don't race with the MC */
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END,
+ &generation_end);
+ EFSYS_MEM_READ_BARRIER();
+
+ /* TX */
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_CONTROL_PKTS, &value);
+ EFSYS_STAT_SUBR_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PAUSE_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_UNICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULTICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_BRDCST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_OCTETS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LT64_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_64_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_65_TO_127_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_128_TO_255_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_256_TO_511_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_512_TO_1023_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_1024_TO_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_SGL_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULT_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LATE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LATE_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_DEFERRED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_DEF_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_DEF_PKTS]), &value);
+
+ /* RX */
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_OCTETS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_UNICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MULTICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_BRDCST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PAUSE_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNDERSIZE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_64_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_65_TO_127_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_128_TO_255_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_256_TO_511_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_512_TO_1023_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_1024_TO_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FCS_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_OVERFLOW_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_DROP_EVENTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_FALSE_CARRIER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FALSE_CARRIER_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_SYMBOL_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_ALIGN_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_ALIGN_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_INTERNAL_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_JABBER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_JABBER_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MATCH_FAULT, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MATCH_FAULT]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value);
+
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ EFSYS_MEM_READ_BARRIER();
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START,
+ &generation_start);
+
+ /* Check that we didn't read the stats in the middle of a DMA */
+ /* Not a good enough check ? */
+ if (memcmp(&generation_start, &generation_end,
+ sizeof (generation_start)))
+ return (EAGAIN);
+
+ if (generationp)
+ *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0);
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+ __checkReturn efx_rc_t
+siena_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu)
+{
+ return (ENOTSUP);
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/siena_mcdi.c b/src/seastar/dpdk/drivers/net/sfc/base/siena_mcdi.c
new file mode 100644
index 00000000..63c29fcb
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/siena_mcdi.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA && EFSYS_OPT_MCDI
+
+#define SIENA_MCDI_PDU(_emip) \
+ (((emip)->emi_port == 1) \
+ ? MC_SMEM_P0_PDU_OFST >> 2 \
+ : MC_SMEM_P1_PDU_OFST >> 2)
+
+#define SIENA_MCDI_DOORBELL(_emip) \
+ (((emip)->emi_port == 1) \
+ ? MC_SMEM_P0_DOORBELL_OFST >> 2 \
+ : MC_SMEM_P1_DOORBELL_OFST >> 2)
+
+#define SIENA_MCDI_STATUS(_emip) \
+ (((emip)->emi_port == 1) \
+ ? MC_SMEM_P0_STATUS_OFST >> 2 \
+ : MC_SMEM_P1_STATUS_OFST >> 2)
+
+
+ void
+siena_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in_bcount(hdr_len) void *hdrp,
+ __in size_t hdr_len,
+ __in_bcount(sdu_len) void *sdup,
+ __in size_t sdu_len)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t dword;
+ unsigned int pdur;
+ unsigned int dbr;
+ unsigned int pos;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ pdur = SIENA_MCDI_PDU(emip);
+ dbr = SIENA_MCDI_DOORBELL(emip);
+
+ /* Write the header */
+ EFSYS_ASSERT3U(hdr_len, ==, sizeof (efx_dword_t));
+ dword = *(efx_dword_t *)hdrp;
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, pdur, &dword, B_TRUE);
+
+ /* Write the payload */
+ for (pos = 0; pos < sdu_len; pos += sizeof (efx_dword_t)) {
+ dword = *(efx_dword_t *)((uint8_t *)sdup + pos);
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM,
+ pdur + 1 + (pos >> 2), &dword, B_FALSE);
+ }
+
+ /* Ring the doorbell */
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xd004be11);
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, dbr, &dword, B_FALSE);
+}
+
+ efx_rc_t
+siena_mcdi_poll_reboot(
+ __in efx_nic_t *enp)
+{
+#if 1
+ /*
+ * XXX Bug 25922, bug 26099: This function is not being used
+ * properly. Until its callers are fixed, it should always
+ * return 0.
+ */
+ _NOTE(ARGUNUSED(enp))
+ return (0);
+#else
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ unsigned int rebootr;
+ efx_dword_t dword;
+ uint32_t value;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ rebootr = SIENA_MCDI_STATUS(emip);
+
+ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, rebootr, &dword, B_FALSE);
+ value = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+
+ if (value == 0)
+ return (0);
+
+ EFX_ZERO_DWORD(dword);
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, rebootr, &dword, B_FALSE);
+
+ if (value == MC_STATUS_DWORD_ASSERT)
+ return (EINTR);
+ else
+ return (EIO);
+#endif
+}
+
+extern __checkReturn boolean_t
+siena_mcdi_poll_response(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t hdr;
+ unsigned int pdur;
+
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ pdur = SIENA_MCDI_PDU(emip);
+
+ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, pdur, &hdr, B_FALSE);
+ return (EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE) ? B_TRUE : B_FALSE);
+}
+
+ void
+siena_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ unsigned int pdur;
+ unsigned int pos;
+ efx_dword_t data;
+
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ pdur = SIENA_MCDI_PDU(emip);
+
+ for (pos = 0; pos < length; pos += sizeof (efx_dword_t)) {
+ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM,
+ pdur + ((offset + pos) >> 2), &data, B_FALSE);
+ memcpy((uint8_t *)bufferp + pos, &data,
+ MIN(sizeof (data), length - pos));
+ }
+}
+
+ __checkReturn efx_rc_t
+siena_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_oword_t oword;
+ unsigned int portnum;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(mtp))
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Determine the port number to use for MCDI */
+ EFX_BAR_READO(enp, FR_AZ_CS_DEBUG_REG, &oword);
+ portnum = EFX_OWORD_FIELD(oword, FRF_CZ_CS_PORT_NUM);
+
+ if (portnum == 0) {
+ /* Presumably booted from ROM; only MCDI port 1 will work */
+ emip->emi_port = 1;
+ } else if (portnum <= 2) {
+ emip->emi_port = portnum;
+ } else {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Siena BootROM and firmware only support MCDIv1 */
+ emip->emi_max_version = 1;
+
+ /*
+ * Wipe the atomic reboot status so subsequent MCDI requests succeed.
+ * BOOT_STATUS is preserved so eno_nic_probe() can boot out of the
+ * assertion handler.
+ */
+ (void) siena_mcdi_poll_reboot(enp);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_mcdi_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ __checkReturn efx_rc_t
+siena_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ switch (id) {
+ case EFX_MCDI_FEATURE_FW_UPDATE:
+ case EFX_MCDI_FEATURE_LINK_CONTROL:
+ case EFX_MCDI_FEATURE_MACADDR_CHANGE:
+ case EFX_MCDI_FEATURE_MAC_SPOOFING:
+ *supportedp = B_TRUE;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Default timeout for MCDI command processing. */
+#define SIENA_MCDI_CMD_TIMEOUT_US (10 * 1000 * 1000)
+
+ void
+siena_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp)
+{
+ _NOTE(ARGUNUSED(enp, emrp))
+
+ *timeoutp = SIENA_MCDI_CMD_TIMEOUT_US;
+}
+
+
+#endif /* EFSYS_OPT_SIENA && EFSYS_OPT_MCDI */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/siena_nic.c b/src/seastar/dpdk/drivers/net/sfc/base/siena_nic.c
new file mode 100644
index 00000000..129b854b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/siena_nic.c
@@ -0,0 +1,585 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#include "mcdi_mon.h"
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+static __checkReturn efx_rc_t
+siena_nic_get_partn_mask(
+ __in efx_nic_t *enp,
+ __out unsigned int *maskp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_TYPES_IN_LEN,
+ MC_CMD_NVRAM_TYPES_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_TYPES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_TYPES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_TYPES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_TYPES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *maskp = MCDI_OUT_DWORD(req, NVRAM_TYPES_OUT_TYPES);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+static __checkReturn efx_rc_t
+siena_board_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint8_t mac_addr[6];
+ efx_dword_t capabilities;
+ uint32_t board_type;
+ uint32_t nevq, nrxq, ntxq;
+ efx_rc_t rc;
+
+ /* External port identifier using one-based port numbering */
+ encp->enc_external_port = (uint8_t)enp->en_mcdi.em_emip.emi_port;
+
+ /* Board configuration */
+ if ((rc = efx_mcdi_get_board_cfg(enp, &board_type,
+ &capabilities, mac_addr)) != 0)
+ goto fail1;
+
+ EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
+
+ encp->enc_board_type = board_type;
+
+ /*
+ * There is no possibility to determine the number of PFs on Siena
+ * by issuing MCDI request, and it is not an easy task to find the
+ * value based on the board type, so 'enc_hw_pf_count' is set to 1
+ */
+ encp->enc_hw_pf_count = 1;
+
+ /* Additional capabilities */
+ encp->enc_clk_mult = 1;
+ if (EFX_DWORD_FIELD(capabilities, MC_CMD_CAPABILITIES_TURBO)) {
+ enp->en_features |= EFX_FEATURE_TURBO;
+
+ if (EFX_DWORD_FIELD(capabilities,
+ MC_CMD_CAPABILITIES_TURBO_ACTIVE)) {
+ encp->enc_clk_mult = 2;
+ }
+ }
+
+ encp->enc_evq_timer_quantum_ns =
+ EFX_EVQ_SIENA_TIMER_QUANTUM_NS / encp->enc_clk_mult;
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
+
+ /* When hash header insertion is enabled, Siena inserts 16 bytes */
+ encp->enc_rx_prefix_size = 16;
+
+ /* Alignment for receive packet DMA buffers */
+ encp->enc_rx_buf_align_start = 1;
+ encp->enc_rx_buf_align_end = 1;
+
+ /* Alignment for WPTR updates */
+ encp->enc_rx_push_align = 1;
+
+ encp->enc_tx_dma_desc_size_max = EFX_MASK32(FSF_AZ_TX_KER_BYTE_COUNT);
+ /* Fragments must not span 4k boundaries. */
+ encp->enc_tx_dma_desc_boundary = 4096;
+
+ /* Resource limits */
+ rc = efx_mcdi_get_resource_limits(enp, &nevq, &nrxq, &ntxq);
+ if (rc != 0) {
+ if (rc != ENOTSUP)
+ goto fail2;
+
+ nevq = 1024;
+ nrxq = EFX_RXQ_LIMIT_TARGET;
+ ntxq = EFX_TXQ_LIMIT_TARGET;
+ }
+ encp->enc_evq_limit = nevq;
+ encp->enc_rxq_limit = MIN(EFX_RXQ_LIMIT_TARGET, nrxq);
+ encp->enc_txq_limit = MIN(EFX_TXQ_LIMIT_TARGET, ntxq);
+
+ encp->enc_txq_max_ndescs = 4096;
+
+ encp->enc_buftbl_limit = SIENA_SRAM_ROWS -
+ (encp->enc_txq_limit * EFX_TXQ_DC_NDESCS(EFX_TXQ_DC_SIZE)) -
+ (encp->enc_rxq_limit * EFX_RXQ_DC_NDESCS(EFX_RXQ_DC_SIZE));
+
+ encp->enc_hw_tx_insert_vlan_enabled = B_FALSE;
+ encp->enc_fw_assisted_tso_enabled = B_FALSE;
+ encp->enc_fw_assisted_tso_v2_enabled = B_FALSE;
+ encp->enc_fw_assisted_tso_v2_n_contexts = 0;
+ encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
+ encp->enc_rx_packed_stream_supported = B_FALSE;
+ encp->enc_rx_var_packed_stream_supported = B_FALSE;
+
+ /* Siena supports two 10G ports, and 8 lanes of PCIe Gen2 */
+ encp->enc_required_pcie_bandwidth_mbps = 2 * 10000;
+ encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN2;
+
+ encp->enc_fw_verified_nvram_update_required = B_FALSE;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_phy_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+
+ /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
+ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
+ goto fail1;
+
+#if EFSYS_OPT_PHY_STATS
+ /* Convert the MCDI statistic mask into the EFX_PHY_STAT mask */
+ siena_phy_decode_stats(enp, encp->enc_mcdi_phy_stat_mask,
+ NULL, &encp->enc_phy_stat_mask, NULL);
+#endif /* EFSYS_OPT_PHY_STATS */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nic_probe(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ siena_link_state_t sls;
+ unsigned int mask;
+ efx_oword_t oword;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ /* Test BIU */
+ if ((rc = efx_nic_biu_test(enp)) != 0)
+ goto fail1;
+
+ /* Clear the region register */
+ EFX_POPULATE_OWORD_4(oword,
+ FRF_AZ_ADR_REGION0, 0,
+ FRF_AZ_ADR_REGION1, (1 << 16),
+ FRF_AZ_ADR_REGION2, (2 << 16),
+ FRF_AZ_ADR_REGION3, (3 << 16));
+ EFX_BAR_WRITEO(enp, FR_AZ_ADR_REGION_REG, &oword);
+
+ /* Read clear any assertion state */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail2;
+
+ /* Exit the assertion handler */
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ goto fail3;
+
+ /* Wrestle control from the BMC */
+ if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
+ goto fail4;
+
+ if ((rc = siena_board_cfg(enp)) != 0)
+ goto fail5;
+
+ if ((rc = siena_phy_cfg(enp)) != 0)
+ goto fail6;
+
+ /* Obtain the default PHY advertised capabilities */
+ if ((rc = siena_nic_reset(enp)) != 0)
+ goto fail7;
+ if ((rc = siena_phy_get_link(enp, &sls)) != 0)
+ goto fail8;
+ epp->ep_default_adv_cap_mask = sls.sls_adv_cap_mask;
+ epp->ep_adv_cap_mask = sls.sls_adv_cap_mask;
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+ if ((rc = siena_nic_get_partn_mask(enp, &mask)) != 0)
+ goto fail9;
+ enp->en_u.siena.enu_partn_mask = mask;
+#endif
+
+#if EFSYS_OPT_MAC_STATS
+ /* Wipe the MAC statistics */
+ if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
+ goto fail10;
+#endif
+
+#if EFSYS_OPT_LOOPBACK
+ if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
+ goto fail11;
+#endif
+
+#if EFSYS_OPT_MON_STATS
+ if ((rc = mcdi_mon_cfg_build(enp)) != 0)
+ goto fail12;
+#endif
+
+ encp->enc_features = enp->en_features;
+
+ return (0);
+
+#if EFSYS_OPT_MON_STATS
+fail12:
+ EFSYS_PROBE(fail12);
+#endif
+#if EFSYS_OPT_LOOPBACK
+fail11:
+ EFSYS_PROBE(fail11);
+#endif
+#if EFSYS_OPT_MAC_STATS
+fail10:
+ EFSYS_PROBE(fail10);
+#endif
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+fail9:
+ EFSYS_PROBE(fail9);
+#endif
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nic_reset(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ /* siena_nic_reset() is called to recover from BADASSERT failures. */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail1;
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ goto fail2;
+
+ /*
+ * Bug24908: ENTITY_RESET_IN_LEN is non zero but zero may be supplied
+ * for backwards compatibility with PORT_RESET_IN_LEN.
+ */
+ EFX_STATIC_ASSERT(MC_CMD_ENTITY_RESET_OUT_LEN == 0);
+
+ req.emr_cmd = MC_CMD_ENTITY_RESET;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (0);
+}
+
+static void
+siena_nic_rx_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /*
+ * RX_INGR_EN is always enabled on Siena, because we rely on
+ * the RX parser to be resiliant to missing SOP/EOP.
+ */
+ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_INGR_EN, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ /* Disable parsing of additional 802.1Q in Q packets */
+ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+}
+
+static void
+siena_nic_usrev_dis(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ EFX_POPULATE_OWORD_1(oword, FRF_CZ_USREV_DIS, 1);
+ EFX_BAR_WRITEO(enp, FR_CZ_USR_EV_CFG, &oword);
+}
+
+ __checkReturn efx_rc_t
+siena_nic_init(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ /* Enable reporting of some events (e.g. link change) */
+ if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
+ goto fail1;
+
+ siena_sram_init(enp);
+
+ /* Configure Siena's RX block */
+ siena_nic_rx_cfg(enp);
+
+ /* Disable USR_EVents for now */
+ siena_nic_usrev_dis(enp);
+
+ /* bug17057: Ensure set_link is called */
+ if ((rc = siena_phy_reconfigure(enp)) != 0)
+ goto fail2;
+
+ enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V1;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_nic_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ void
+siena_nic_unprobe(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_MON_STATS
+ mcdi_mon_cfg_free(enp);
+#endif /* EFSYS_OPT_MON_STATS */
+ (void) efx_mcdi_drv_attach(enp, B_FALSE);
+}
+
+#if EFSYS_OPT_DIAG
+
+static efx_register_set_t __siena_registers[] = {
+ { FR_AZ_ADR_REGION_REG_OFST, 0, 1 },
+ { FR_CZ_USR_EV_CFG_OFST, 0, 1 },
+ { FR_AZ_RX_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_TX_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_TX_RESERVED_REG_OFST, 0, 1 },
+ { FR_AZ_SRM_TX_DC_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_RX_DC_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_RX_DC_PF_WM_REG_OFST, 0, 1 },
+ { FR_AZ_DP_CTRL_REG_OFST, 0, 1 },
+ { FR_BZ_RX_RSS_TKEY_REG_OFST, 0, 1},
+ { FR_CZ_RX_RSS_IPV6_REG1_OFST, 0, 1},
+ { FR_CZ_RX_RSS_IPV6_REG2_OFST, 0, 1},
+ { FR_CZ_RX_RSS_IPV6_REG3_OFST, 0, 1}
+};
+
+static const uint32_t __siena_register_masks[] = {
+ 0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF,
+ 0x000103FF, 0x00000000, 0x00000000, 0x00000000,
+ 0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000,
+ 0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF,
+ 0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF,
+ 0x001FFFFF, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000003, 0x00000000, 0x00000000, 0x00000000,
+ 0x000003FF, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000FFF, 0x00000000, 0x00000000, 0x00000000,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000
+};
+
+static efx_register_set_t __siena_tables[] = {
+ { FR_AZ_RX_FILTER_TBL0_OFST, FR_AZ_RX_FILTER_TBL0_STEP,
+ FR_AZ_RX_FILTER_TBL0_ROWS },
+ { FR_CZ_RX_MAC_FILTER_TBL0_OFST, FR_CZ_RX_MAC_FILTER_TBL0_STEP,
+ FR_CZ_RX_MAC_FILTER_TBL0_ROWS },
+ { FR_AZ_RX_DESC_PTR_TBL_OFST,
+ FR_AZ_RX_DESC_PTR_TBL_STEP, FR_CZ_RX_DESC_PTR_TBL_ROWS },
+ { FR_AZ_TX_DESC_PTR_TBL_OFST,
+ FR_AZ_TX_DESC_PTR_TBL_STEP, FR_CZ_TX_DESC_PTR_TBL_ROWS },
+ { FR_AZ_TIMER_TBL_OFST, FR_AZ_TIMER_TBL_STEP, FR_CZ_TIMER_TBL_ROWS },
+ { FR_CZ_TX_FILTER_TBL0_OFST,
+ FR_CZ_TX_FILTER_TBL0_STEP, FR_CZ_TX_FILTER_TBL0_ROWS },
+ { FR_CZ_TX_MAC_FILTER_TBL0_OFST,
+ FR_CZ_TX_MAC_FILTER_TBL0_STEP, FR_CZ_TX_MAC_FILTER_TBL0_ROWS }
+};
+
+static const uint32_t __siena_table_masks[] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000003FF,
+ 0xFFFF0FFF, 0xFFFFFFFF, 0x00000E7F, 0x00000000,
+ 0xFFFFFFFE, 0x0FFFFFFF, 0x01800000, 0x00000000,
+ 0xFFFFFFFE, 0x0FFFFFFF, 0x0C000000, 0x00000000,
+ 0x3FFFFFFF, 0x00000000, 0x00000000, 0x00000000,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000013FF,
+ 0xFFFF07FF, 0xFFFFFFFF, 0x0000007F, 0x00000000,
+};
+
+ __checkReturn efx_rc_t
+siena_nic_register_test(
+ __in efx_nic_t *enp)
+{
+ efx_register_set_t *rsp;
+ const uint32_t *dwordp;
+ unsigned int nitems;
+ unsigned int count;
+ efx_rc_t rc;
+
+ /* Fill out the register mask entries */
+ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_register_masks)
+ == EFX_ARRAY_SIZE(__siena_registers) * 4);
+
+ nitems = EFX_ARRAY_SIZE(__siena_registers);
+ dwordp = __siena_register_masks;
+ for (count = 0; count < nitems; ++count) {
+ rsp = __siena_registers + count;
+ rsp->mask.eo_u32[0] = *dwordp++;
+ rsp->mask.eo_u32[1] = *dwordp++;
+ rsp->mask.eo_u32[2] = *dwordp++;
+ rsp->mask.eo_u32[3] = *dwordp++;
+ }
+
+ /* Fill out the register table entries */
+ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_table_masks)
+ == EFX_ARRAY_SIZE(__siena_tables) * 4);
+
+ nitems = EFX_ARRAY_SIZE(__siena_tables);
+ dwordp = __siena_table_masks;
+ for (count = 0; count < nitems; ++count) {
+ rsp = __siena_tables + count;
+ rsp->mask.eo_u32[0] = *dwordp++;
+ rsp->mask.eo_u32[1] = *dwordp++;
+ rsp->mask.eo_u32[2] = *dwordp++;
+ rsp->mask.eo_u32[3] = *dwordp++;
+ }
+
+ if ((rc = efx_nic_test_registers(enp, __siena_registers,
+ EFX_ARRAY_SIZE(__siena_registers))) != 0)
+ goto fail1;
+
+ if ((rc = efx_nic_test_tables(enp, __siena_tables,
+ EFX_PATTERN_BYTE_ALTERNATE,
+ EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ goto fail2;
+
+ if ((rc = efx_nic_test_tables(enp, __siena_tables,
+ EFX_PATTERN_BYTE_CHANGING,
+ EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ goto fail3;
+
+ if ((rc = efx_nic_test_tables(enp, __siena_tables,
+ EFX_PATTERN_BIT_SWEEP, EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ goto fail4;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/siena_nvram.c b/src/seastar/dpdk/drivers/net/sfc/base/siena_nvram.c
new file mode 100644
index 00000000..af4cf172
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/siena_nvram.c
@@ -0,0 +1,734 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ if ((1 << partn) & ~enp->en_u.siena.enu_partn_mask) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, sizep,
+ NULL, NULL, NULL)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_update_start(enp, partn)) != 0) {
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t chunk;
+ efx_rc_t rc;
+
+ while (size > 0) {
+ chunk = MIN(size, SIENA_NVRAM_CHUNK);
+
+ if ((rc = efx_mcdi_nvram_read(enp, partn, offset, data, chunk,
+ MC_CMD_NVRAM_READ_IN_V2_DEFAULT)) != 0) {
+ goto fail1;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, size)) != 0) {
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t chunk;
+ efx_rc_t rc;
+
+ while (size > 0) {
+ chunk = MIN(size, SIENA_NVRAM_CHUNK);
+
+ if ((rc = efx_mcdi_nvram_write(enp, partn, offset,
+ data, chunk)) != 0) {
+ goto fail1;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ boolean_t reboot;
+ efx_rc_t rc;
+
+ /*
+ * Reboot into the new image only for PHYs. The driver has to
+ * explicitly cope with an MC reboot after a firmware update.
+ */
+ reboot = (partn == MC_CMD_NVRAM_TYPE_PHY_PORT0 ||
+ partn == MC_CMD_NVRAM_TYPE_PHY_PORT1 ||
+ partn == MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO);
+
+ rc = efx_mcdi_nvram_update_finish(enp, partn, reboot, NULL);
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+typedef struct siena_parttbl_entry_s {
+ unsigned int partn;
+ unsigned int port;
+ efx_nvram_type_t nvtype;
+} siena_parttbl_entry_t;
+
+static siena_parttbl_entry_t siena_parttbl[] = {
+ {MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO, 1, EFX_NVRAM_NULLPHY},
+ {MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO, 2, EFX_NVRAM_NULLPHY},
+ {MC_CMD_NVRAM_TYPE_MC_FW, 1, EFX_NVRAM_MC_FIRMWARE},
+ {MC_CMD_NVRAM_TYPE_MC_FW, 2, EFX_NVRAM_MC_FIRMWARE},
+ {MC_CMD_NVRAM_TYPE_MC_FW_BACKUP, 1, EFX_NVRAM_MC_GOLDEN},
+ {MC_CMD_NVRAM_TYPE_MC_FW_BACKUP, 2, EFX_NVRAM_MC_GOLDEN},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM, 1, EFX_NVRAM_BOOTROM},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM, 2, EFX_NVRAM_BOOTROM},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1, 2, EFX_NVRAM_BOOTROM_CFG},
+ {MC_CMD_NVRAM_TYPE_PHY_PORT0, 1, EFX_NVRAM_PHY},
+ {MC_CMD_NVRAM_TYPE_PHY_PORT1, 2, EFX_NVRAM_PHY},
+ {MC_CMD_NVRAM_TYPE_FPGA, 1, EFX_NVRAM_FPGA},
+ {MC_CMD_NVRAM_TYPE_FPGA, 2, EFX_NVRAM_FPGA},
+ {MC_CMD_NVRAM_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP},
+ {MC_CMD_NVRAM_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP},
+ {MC_CMD_NVRAM_TYPE_FC_FW, 1, EFX_NVRAM_FCFW},
+ {MC_CMD_NVRAM_TYPE_FC_FW, 2, EFX_NVRAM_FCFW},
+ {MC_CMD_NVRAM_TYPE_CPLD, 1, EFX_NVRAM_CPLD},
+ {MC_CMD_NVRAM_TYPE_CPLD, 2, EFX_NVRAM_CPLD},
+ {MC_CMD_NVRAM_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE},
+ {MC_CMD_NVRAM_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE}
+};
+
+ __checkReturn efx_rc_t
+siena_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ unsigned int i;
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT(partnp != NULL);
+
+ for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) {
+ siena_parttbl_entry_t *entry = &siena_parttbl[i];
+
+ if (entry->port == emip->emi_port && entry->nvtype == type) {
+ *partnp = entry->partn;
+ return (0);
+ }
+ }
+
+ return (ENOTSUP);
+}
+
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+siena_nvram_test(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_parttbl_entry_t *entry;
+ unsigned int i;
+ efx_rc_t rc;
+
+ /*
+ * Iterate over the list of supported partition types
+ * applicable to *this* port
+ */
+ for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) {
+ entry = &siena_parttbl[i];
+
+ if (entry->port != emip->emi_port ||
+ !(enp->en_u.siena.enu_partn_mask & (1 << entry->partn)))
+ continue;
+
+ if ((rc = efx_mcdi_nvram_test(enp, entry->partn)) != 0) {
+ goto fail1;
+ }
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+
+#define SIENA_DYNAMIC_CFG_SIZE(_nitems) \
+ (sizeof (siena_mc_dynamic_config_hdr_t) + ((_nitems) * \
+ sizeof (((siena_mc_dynamic_config_hdr_t *)NULL)->fw_version[0])))
+
+ __checkReturn efx_rc_t
+siena_nvram_get_dynamic_cfg(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t vpd,
+ __out siena_mc_dynamic_config_hdr_t **dcfgp,
+ __out size_t *sizep)
+{
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ size_t size;
+ uint8_t cksum;
+ unsigned int vpd_offset;
+ unsigned int vpd_length;
+ unsigned int hdr_length;
+ unsigned int nversions;
+ unsigned int pos;
+ unsigned int region;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(partn == MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 ||
+ partn == MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1);
+
+ /*
+ * Allocate sufficient memory for the entire dynamiccfg area, even
+ * if we're not actually going to read in the VPD.
+ */
+ if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0)
+ goto fail1;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, size, dcfg);
+ if (dcfg == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ if ((rc = siena_nvram_partn_read(enp, partn, 0,
+ (caddr_t)dcfg, SIENA_NVRAM_CHUNK)) != 0)
+ goto fail3;
+
+ /* Verify the magic */
+ if (EFX_DWORD_FIELD(dcfg->magic, EFX_DWORD_0)
+ != SIENA_MC_DYNAMIC_CONFIG_MAGIC)
+ goto invalid1;
+
+ /* All future versions of the structure must be backwards compatible */
+ EFX_STATIC_ASSERT(SIENA_MC_DYNAMIC_CONFIG_VERSION == 0);
+
+ hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
+ nversions = EFX_DWORD_FIELD(dcfg->num_fw_version_items, EFX_DWORD_0);
+ vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
+ vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
+
+ /* Verify the hdr doesn't overflow the partn size */
+ if (hdr_length > size || vpd_offset > size || vpd_length > size ||
+ vpd_length + vpd_offset > size)
+ goto invalid2;
+
+ /* Verify the header has room for all it's versions */
+ if (hdr_length < SIENA_DYNAMIC_CFG_SIZE(0) ||
+ hdr_length < SIENA_DYNAMIC_CFG_SIZE(nversions))
+ goto invalid3;
+
+ /*
+ * Read the remaining portion of the dcfg, either including
+ * the whole of VPD (there is no vpd length in this structure,
+ * so we have to parse each tag), or just the dcfg header itself
+ */
+ region = vpd ? vpd_offset + vpd_length : hdr_length;
+ if (region > SIENA_NVRAM_CHUNK) {
+ if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK,
+ (caddr_t)dcfg + SIENA_NVRAM_CHUNK,
+ region - SIENA_NVRAM_CHUNK)) != 0)
+ goto fail4;
+ }
+
+ /* Verify checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)dcfg)[pos];
+ if (cksum != 0)
+ goto invalid4;
+
+ goto done;
+
+invalid4:
+ EFSYS_PROBE(invalid4);
+invalid3:
+ EFSYS_PROBE(invalid3);
+invalid2:
+ EFSYS_PROBE(invalid2);
+invalid1:
+ EFSYS_PROBE(invalid1);
+
+ /*
+ * Construct a new "null" dcfg, with an empty version vector,
+ * and an empty VPD chunk trailing. This has the neat side effect
+ * of testing the exception paths in the write path.
+ */
+ EFX_POPULATE_DWORD_1(dcfg->magic,
+ EFX_DWORD_0, SIENA_MC_DYNAMIC_CONFIG_MAGIC);
+ EFX_POPULATE_WORD_1(dcfg->length, EFX_WORD_0, sizeof (*dcfg));
+ EFX_POPULATE_BYTE_1(dcfg->version, EFX_BYTE_0,
+ SIENA_MC_DYNAMIC_CONFIG_VERSION);
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset,
+ EFX_DWORD_0, sizeof (*dcfg));
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_length, EFX_DWORD_0, 0);
+ EFX_POPULATE_DWORD_1(dcfg->num_fw_version_items, EFX_DWORD_0, 0);
+
+done:
+ *dcfgp = dcfg;
+ *sizep = size;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, dcfg);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_get_subtype(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_BOARD_CFG_IN_LEN,
+ MC_CMD_GET_BOARD_CFG_OUT_LENMAX)];
+ efx_word_t *fw_list;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_BOARD_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used <
+ MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST +
+ (partn + 1) * sizeof (efx_word_t)) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ fw_list = MCDI_OUT2(req, efx_word_t,
+ GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
+ *subtypep = EFX_WORD_FIELD(fw_list[partn], EFX_WORD_0);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4])
+{
+ siena_mc_dynamic_config_hdr_t *dcfg;
+ siena_parttbl_entry_t *entry;
+ uint32_t dcfg_partn;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if ((1 << partn) & ~enp->en_u.siena.enu_partn_mask) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = siena_nvram_get_subtype(enp, partn, subtypep)) != 0)
+ goto fail2;
+
+ /*
+ * Some partitions are accessible from both ports (for instance BOOTROM)
+ * Find the highest version reported by all dcfg structures on ports
+ * that have access to this partition.
+ */
+ version[0] = version[1] = version[2] = version[3] = 0;
+ for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) {
+ siena_mc_fw_version_t *verp;
+ unsigned int nitems;
+ uint16_t temp[4];
+ size_t length;
+
+ entry = &siena_parttbl[i];
+ if (entry->partn != partn)
+ continue;
+
+ dcfg_partn = (entry->port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+ /*
+ * Ingore missing partitions on port 2, assuming they're due
+ * to to running on a single port part.
+ */
+ if ((1 << dcfg_partn) & ~enp->en_u.siena.enu_partn_mask) {
+ if (entry->port == 2)
+ continue;
+ }
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_FALSE, &dcfg, &length)) != 0)
+ goto fail3;
+
+ nitems = EFX_DWORD_FIELD(dcfg->num_fw_version_items,
+ EFX_DWORD_0);
+ if (nitems < entry->partn)
+ goto done;
+
+ verp = &dcfg->fw_version[partn];
+ temp[0] = EFX_WORD_FIELD(verp->version_w, EFX_WORD_0);
+ temp[1] = EFX_WORD_FIELD(verp->version_x, EFX_WORD_0);
+ temp[2] = EFX_WORD_FIELD(verp->version_y, EFX_WORD_0);
+ temp[3] = EFX_WORD_FIELD(verp->version_z, EFX_WORD_0);
+ if (memcmp(version, temp, sizeof (temp)) < 0)
+ memcpy(version, temp, sizeof (temp));
+
+done:
+ EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep)
+{
+ efx_rc_t rc;
+
+ if ((rc = siena_nvram_partn_lock(enp, partn)) != 0)
+ goto fail1;
+
+ if (chunk_sizep != NULL)
+ *chunk_sizep = SIENA_NVRAM_CHUNK;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_rc_t rc;
+
+ if ((rc = siena_nvram_partn_unlock(enp, partn)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4])
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ siena_mc_fw_version_t *fwverp;
+ uint32_t dcfg_partn;
+ size_t dcfg_size;
+ unsigned int hdr_length;
+ unsigned int vpd_length;
+ unsigned int vpd_offset;
+ unsigned int nitems;
+ unsigned int required_hdr_length;
+ unsigned int pos;
+ uint8_t cksum;
+ uint32_t subtype;
+ size_t length;
+ efx_rc_t rc;
+
+ dcfg_partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_partn_size(enp, dcfg_partn, &dcfg_size)) != 0)
+ goto fail1;
+
+ if ((rc = siena_nvram_partn_lock(enp, dcfg_partn)) != 0)
+ goto fail2;
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_TRUE, &dcfg, &length)) != 0)
+ goto fail3;
+
+ hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
+ nitems = EFX_DWORD_FIELD(dcfg->num_fw_version_items, EFX_DWORD_0);
+ vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
+ vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
+
+ /*
+ * NOTE: This function will blatt any fields trailing the version
+ * vector, or the VPD chunk.
+ */
+ required_hdr_length = SIENA_DYNAMIC_CFG_SIZE(partn + 1);
+ if (required_hdr_length + vpd_length > length) {
+ rc = ENOSPC;
+ goto fail4;
+ }
+
+ if (vpd_offset < required_hdr_length) {
+ (void) memmove((caddr_t)dcfg + required_hdr_length,
+ (caddr_t)dcfg + vpd_offset, vpd_length);
+ vpd_offset = required_hdr_length;
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset,
+ EFX_DWORD_0, vpd_offset);
+ }
+
+ if (hdr_length < required_hdr_length) {
+ (void) memset((caddr_t)dcfg + hdr_length, 0,
+ required_hdr_length - hdr_length);
+ hdr_length = required_hdr_length;
+ EFX_POPULATE_WORD_1(dcfg->length,
+ EFX_WORD_0, hdr_length);
+ }
+
+ /* Get the subtype to insert into the fw_subtype array */
+ if ((rc = siena_nvram_get_subtype(enp, partn, &subtype)) != 0)
+ goto fail5;
+
+ /* Fill out the new version */
+ fwverp = &dcfg->fw_version[partn];
+ EFX_POPULATE_DWORD_1(fwverp->fw_subtype, EFX_DWORD_0, subtype);
+ EFX_POPULATE_WORD_1(fwverp->version_w, EFX_WORD_0, version[0]);
+ EFX_POPULATE_WORD_1(fwverp->version_x, EFX_WORD_0, version[1]);
+ EFX_POPULATE_WORD_1(fwverp->version_y, EFX_WORD_0, version[2]);
+ EFX_POPULATE_WORD_1(fwverp->version_z, EFX_WORD_0, version[3]);
+
+ /* Update the version count */
+ if (nitems < partn + 1) {
+ nitems = partn + 1;
+ EFX_POPULATE_DWORD_1(dcfg->num_fw_version_items,
+ EFX_DWORD_0, nitems);
+ }
+
+ /* Update the checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)dcfg)[pos];
+ dcfg->csum.eb_u8[0] -= cksum;
+
+ /* Erase and write the new partition */
+ if ((rc = siena_nvram_partn_erase(enp, dcfg_partn, 0, dcfg_size)) != 0)
+ goto fail6;
+
+ /* Write out the new structure to nvram */
+ if ((rc = siena_nvram_partn_write(enp, dcfg_partn, 0,
+ (caddr_t)dcfg, vpd_offset + vpd_length)) != 0)
+ goto fail7;
+
+ EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
+
+ siena_nvram_partn_unlock(enp, dcfg_partn);
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/siena_phy.c b/src/seastar/dpdk/drivers/net/sfc/base/siena_phy.c
new file mode 100644
index 00000000..b90ccabc
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/siena_phy.c
@@ -0,0 +1,797 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+static void
+siena_phy_decode_cap(
+ __in uint32_t mcdi_cap,
+ __out uint32_t *maskp)
+{
+ uint32_t mask;
+
+ mask = 0;
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ mask |= (1 << EFX_PHY_CAP_PAUSE);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ mask |= (1 << EFX_PHY_CAP_ASYM);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ mask |= (1 << EFX_PHY_CAP_AN);
+
+ *maskp = mask;
+}
+
+static void
+siena_phy_decode_link_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t link_flags,
+ __in unsigned int speed,
+ __in unsigned int fcntl,
+ __out efx_link_mode_t *link_modep,
+ __out unsigned int *fcntlp)
+{
+ boolean_t fd = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ boolean_t up = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (!up)
+ *link_modep = EFX_LINK_DOWN;
+ else if (speed == 10000 && fd)
+ *link_modep = EFX_LINK_10000FDX;
+ else if (speed == 1000)
+ *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX;
+ else if (speed == 100)
+ *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX;
+ else if (speed == 10)
+ *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX;
+ else
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ if (fcntl == MC_CMD_FCNTL_OFF)
+ *fcntlp = 0;
+ else if (fcntl == MC_CMD_FCNTL_RESPOND)
+ *fcntlp = EFX_FCNTL_RESPOND;
+ else if (fcntl == MC_CMD_FCNTL_BIDIR)
+ *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ else {
+ EFSYS_PROBE1(mc_pcol_error, int, fcntl);
+ *fcntlp = 0;
+ }
+}
+
+ void
+siena_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ unsigned int link_flags;
+ unsigned int speed;
+ unsigned int fcntl;
+ efx_link_mode_t link_mode;
+ uint32_t lp_cap_mask;
+
+ /*
+ * Convert the LINKCHANGE speed enumeration into mbit/s, in the
+ * same way as GET_LINK encodes the speed
+ */
+ switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) {
+ case MCDI_EVENT_LINKCHANGE_SPEED_100M:
+ speed = 100;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_1G:
+ speed = 1000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_10G:
+ speed = 10000;
+ break;
+ default:
+ speed = 0;
+ break;
+ }
+
+ link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS);
+ siena_phy_decode_link_mode(enp, link_flags, speed,
+ MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL),
+ &link_mode, &fcntl);
+ siena_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP),
+ &lp_cap_mask);
+
+ /*
+ * It's safe to update ep_lp_cap_mask without the driver's port lock
+ * because presumably any concurrently running efx_port_poll() is
+ * only going to arrive at the same value.
+ *
+ * ep_fcntl has two meanings. It's either the link common fcntl
+ * (if the PHY supports AN), or it's the forced link state. If
+ * the former, it's safe to update the value for the same reason as
+ * for ep_lp_cap_mask. If the latter, then just ignore the value,
+ * because we can race with efx_mac_fcntl_set().
+ */
+ epp->ep_lp_cap_mask = lp_cap_mask;
+ if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN))
+ epp->ep_fcntl = fcntl;
+
+ *link_modep = link_mode;
+}
+
+ __checkReturn efx_rc_t
+siena_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t power)
+{
+ efx_rc_t rc;
+
+ if (!power)
+ return (0);
+
+ /* Check if the PHY is a zombie */
+ if ((rc = siena_phy_verify(enp)) != 0)
+ goto fail1;
+
+ enp->en_reset_flags |= EFX_RESET_PHY;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_get_link(
+ __in efx_nic_t *enp,
+ __out siena_link_state_t *slsp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
+ MC_CMD_GET_LINK_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
+ &slsp->sls_adv_cap_mask);
+ siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
+ &slsp->sls_lp_cap_mask);
+
+ siena_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
+ &slsp->sls_link_mode, &slsp->sls_fcntl);
+
+#if EFSYS_OPT_LOOPBACK
+ /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
+
+ slsp->sls_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ slsp->sls_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MAX(MC_CMD_SET_ID_LED_IN_LEN,
+ MC_CMD_SET_ID_LED_OUT_LEN),
+ MAX(MC_CMD_SET_LINK_IN_LEN,
+ MC_CMD_SET_LINK_OUT_LEN))];
+ uint32_t cap_mask;
+ unsigned int led_mode;
+ unsigned int speed;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN;
+
+ cap_mask = epp->ep_adv_cap_mask;
+ MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP,
+ PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1,
+ PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1,
+ PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1,
+ PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1,
+ PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1,
+ PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1,
+ PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1,
+ PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1,
+ PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1,
+ PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);
+
+#if EFSYS_OPT_LOOPBACK
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE,
+ epp->ep_loopback_type);
+ switch (epp->ep_loopback_link_mode) {
+ case EFX_LINK_100FDX:
+ speed = 100;
+ break;
+ case EFX_LINK_1000FDX:
+ speed = 1000;
+ break;
+ case EFX_LINK_10000FDX:
+ speed = 10000;
+ break;
+ default:
+ speed = 0;
+ }
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE);
+ speed = 0;
+#endif /* EFSYS_OPT_LOOPBACK */
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed);
+
+#if EFSYS_OPT_PHY_FLAGS
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags);
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0);
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* And set the blink mode */
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_ID_LED;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN;
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+ switch (epp->ep_phy_led_mode) {
+ case EFX_PHY_LED_DEFAULT:
+ led_mode = MC_CMD_LED_DEFAULT;
+ break;
+ case EFX_PHY_LED_OFF:
+ led_mode = MC_CMD_LED_OFF;
+ break;
+ case EFX_PHY_LED_ON:
+ led_mode = MC_CMD_LED_ON;
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ led_mode = MC_CMD_LED_DEFAULT;
+ }
+
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode);
+#else
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT);
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_verify(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,
+ MC_CMD_GET_PHY_STATE_OUT_LEN)];
+ uint32_t state;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE);
+ if (state != MC_CMD_PHY_STATE_OK) {
+ if (state != MC_CMD_PHY_STATE_ZOMBIE)
+ EFSYS_PROBE1(mc_pcol_error, int, state);
+ rc = ENOTACTIVE;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip)
+{
+ _NOTE(ARGUNUSED(enp, ouip))
+
+ return (ENOTSUP);
+}
+
+#if EFSYS_OPT_PHY_STATS
+
+#define SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \
+ _mc_record, _efx_record) \
+ if ((_vmask) & (1ULL << (_mc_record))) { \
+ (_smask) |= (1ULL << (_efx_record)); \
+ if ((_stat) != NULL && !EFSYS_MEM_IS_NULL(_esmp)) { \
+ efx_dword_t dword; \
+ EFSYS_MEM_READD(_esmp, (_mc_record) * 4, &dword);\
+ (_stat)[_efx_record] = \
+ EFX_DWORD_FIELD(dword, EFX_DWORD_0); \
+ } \
+ }
+
+#define SIENA_SIMPLE_STAT_SET2(_vmask, _esmp, _smask, _stat, _record) \
+ SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \
+ MC_CMD_ ## _record, \
+ EFX_PHY_STAT_ ## _record)
+
+ void
+siena_phy_decode_stats(
+ __in efx_nic_t *enp,
+ __in uint32_t vmask,
+ __in_opt efsys_mem_t *esmp,
+ __out_opt uint64_t *smaskp,
+ __inout_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ uint64_t smask = 0;
+
+ _NOTE(ARGUNUSED(enp))
+
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, OUI);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_LINK_UP);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_RX_FAULT);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_TX_FAULT);
+
+ if (vmask & (1 << MC_CMD_PMA_PMD_SIGNAL)) {
+ smask |= ((1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_A) |
+ (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_B) |
+ (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_C) |
+ (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_D));
+ if (stat != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
+ efx_dword_t dword;
+ uint32_t sig;
+ EFSYS_MEM_READD(esmp, 4 * MC_CMD_PMA_PMD_SIGNAL,
+ &dword);
+ sig = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_A] = (sig >> 1) & 1;
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_B] = (sig >> 2) & 1;
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_C] = (sig >> 3) & 1;
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_D] = (sig >> 4) & 1;
+ }
+ }
+
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_A,
+ EFX_PHY_STAT_SNR_A);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_B,
+ EFX_PHY_STAT_SNR_B);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_C,
+ EFX_PHY_STAT_SNR_C);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_D,
+ EFX_PHY_STAT_SNR_D);
+
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_LINK_UP);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_RX_FAULT);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_TX_FAULT);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BER);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BLOCK_ERRORS);
+
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_LINK_UP,
+ EFX_PHY_STAT_PHY_XS_LINK_UP);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_RX_FAULT,
+ EFX_PHY_STAT_PHY_XS_RX_FAULT);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_TX_FAULT,
+ EFX_PHY_STAT_PHY_XS_TX_FAULT);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_ALIGN,
+ EFX_PHY_STAT_PHY_XS_ALIGN);
+
+ if (vmask & (1 << MC_CMD_PHYXS_SYNC)) {
+ smask |= ((1 << EFX_PHY_STAT_PHY_XS_SYNC_A) |
+ (1 << EFX_PHY_STAT_PHY_XS_SYNC_B) |
+ (1 << EFX_PHY_STAT_PHY_XS_SYNC_C) |
+ (1 << EFX_PHY_STAT_PHY_XS_SYNC_D));
+ if (stat != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
+ efx_dword_t dword;
+ uint32_t sync;
+ EFSYS_MEM_READD(esmp, 4 * MC_CMD_PHYXS_SYNC, &dword);
+ sync = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_A] = (sync >> 0) & 1;
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_B] = (sync >> 1) & 1;
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_C] = (sync >> 2) & 1;
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_D] = (sync >> 3) & 1;
+ }
+ }
+
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_LINK_UP);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_COMPLETE);
+
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_CL22_LINK_UP,
+ EFX_PHY_STAT_CL22EXT_LINK_UP);
+
+ if (smaskp != NULL)
+ *smaskp = smask;
+}
+
+ __checkReturn efx_rc_t
+siena_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t vmask = encp->enc_mcdi_phy_stat_mask;
+ uint64_t smask;
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_PHY_STATS_IN_LEN,
+ MC_CMD_PHY_STATS_OUT_DMA_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_PHY_STATS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_PHY_STATS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_PHY_STATS_OUT_DMA_LEN;
+
+ MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_LO,
+ EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_HI,
+ EFSYS_MEM_ADDR(esmp) >> 32);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ EFSYS_ASSERT3U(req.emr_out_length, ==, MC_CMD_PHY_STATS_OUT_DMA_LEN);
+
+ siena_phy_decode_stats(enp, vmask, esmp, &smask, stat);
+ EFSYS_ASSERT(smask == encp->enc_phy_stat_mask);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+
+ __checkReturn efx_rc_t
+siena_phy_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_bist_start(enp, type)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn unsigned long
+siena_phy_sft9001_bist_status(
+ __in uint16_t code)
+{
+ switch (code) {
+ case MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY:
+ return (EFX_PHY_CABLE_STATUS_BUSY);
+ case MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT:
+ return (EFX_PHY_CABLE_STATUS_INTERPAIRSHORT);
+ case MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT:
+ return (EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT);
+ case MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN:
+ return (EFX_PHY_CABLE_STATUS_OPEN);
+ case MC_CMD_POLL_BIST_SFT9001_PAIR_OK:
+ return (EFX_PHY_CABLE_STATUS_OK);
+ default:
+ return (EFX_PHY_CABLE_STATUS_INVALID);
+ }
+}
+
+ __checkReturn efx_rc_t
+siena_phy_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint8_t payload[MAX(MC_CMD_POLL_BIST_IN_LEN,
+ MCDI_CTL_SDU_LEN_MAX)];
+ uint32_t value_mask = 0;
+ efx_mcdi_req_t req;
+ uint32_t result;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_POLL_BIST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MCDI_CTL_SDU_LEN_MAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_POLL_BIST_OUT_RESULT_OFST + 4) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (count > 0)
+ (void) memset(valuesp, '\0', count * sizeof (unsigned long));
+
+ result = MCDI_OUT_DWORD(req, POLL_BIST_OUT_RESULT);
+
+ /* Extract PHY specific results */
+ if (result == MC_CMD_POLL_BIST_PASSED &&
+ encp->enc_phy_type == EFX_PHY_SFT9001B &&
+ req.emr_out_length_used >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN &&
+ (type == EFX_BIST_TYPE_PHY_CABLE_SHORT ||
+ type == EFX_BIST_TYPE_PHY_CABLE_LONG)) {
+ uint16_t word;
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_A) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_A] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_A);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_B) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_B] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_B);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_C) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_C] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_C);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_D) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_D] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_D);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_A) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_A);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_A] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_A);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_B) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_B);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_B] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_B);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_C) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_C);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_C] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_C);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_D) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_D);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_D] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_D);
+ }
+
+ } else if (result == MC_CMD_POLL_BIST_FAILED &&
+ encp->enc_phy_type == EFX_PHY_QLX111V &&
+ req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MRSFP_LEN &&
+ count > EFX_BIST_FAULT_CODE) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_FAULT_CODE] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MRSFP_TEST);
+ value_mask |= 1 << EFX_BIST_FAULT_CODE;
+ }
+
+ if (value_maskp != NULL)
+ *value_maskp = value_mask;
+
+ EFSYS_ASSERT(resultp != NULL);
+ if (result == MC_CMD_POLL_BIST_RUNNING)
+ *resultp = EFX_BIST_RESULT_RUNNING;
+ else if (result == MC_CMD_POLL_BIST_PASSED)
+ *resultp = EFX_BIST_RESULT_PASSED;
+ else
+ *resultp = EFX_BIST_RESULT_FAILED;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_phy_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ /* There is no way to stop BIST on Siena */
+ _NOTE(ARGUNUSED(enp, type))
+}
+
+#endif /* EFSYS_OPT_BIST */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/siena_sram.c b/src/seastar/dpdk/drivers/net/sfc/base/siena_sram.c
new file mode 100644
index 00000000..572c2e9a
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/siena_sram.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+ void
+siena_sram_init(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ uint32_t rx_base, tx_base;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ rx_base = encp->enc_buftbl_limit;
+ tx_base = rx_base + (encp->enc_rxq_limit *
+ EFX_RXQ_DC_NDESCS(EFX_RXQ_DC_SIZE));
+
+ /* Initialize the transmit descriptor cache */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_TX_DC_BASE_ADR, tx_base);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_TX_DC_CFG_REG, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DC_SIZE, EFX_TXQ_DC_SIZE);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_DC_CFG_REG, &oword);
+
+ /* Initialize the receive descriptor cache */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_RX_DC_BASE_ADR, rx_base);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_RX_DC_CFG_REG, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DC_SIZE, EFX_RXQ_DC_SIZE);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_DC_CFG_REG, &oword);
+
+ /* Set receive descriptor pre-fetch low water mark */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DC_PF_LWM, 56);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_DC_PF_WM_REG, &oword);
+
+ /* Set the event queue to use for SRAM updates */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_UPD_EVQ_ID, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_UPD_EVQ_REG, &oword);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+siena_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_sram_pattern_fn_t func)
+{
+ efx_oword_t oword;
+ efx_qword_t qword;
+ efx_qword_t verify;
+ size_t rows;
+ unsigned int wptr;
+ unsigned int rptr;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Reconfigure into HALF buffer table mode */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
+
+ /*
+ * Move the descriptor caches up to the top of SRAM, and test
+ * all of SRAM below them. We only miss out one row here.
+ */
+ rows = SIENA_SRAM_ROWS - 1;
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_RX_DC_BASE_ADR, rows);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_RX_DC_CFG_REG, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_TX_DC_BASE_ADR, rows + 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_TX_DC_CFG_REG, &oword);
+
+ /*
+ * Write the pattern through BUF_HALF_TBL. Write
+ * in 64 entry batches, waiting 1us in between each batch
+ * to guarantee not to overflow the SRAM fifo
+ */
+ for (wptr = 0, rptr = 0; wptr < rows; ++wptr) {
+ func(wptr, B_FALSE, &qword);
+ EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_HALF_TBL, wptr, &qword);
+
+ if ((wptr - rptr) < 64 && wptr < rows - 1)
+ continue;
+
+ EFSYS_SPIN(1);
+
+ for (; rptr <= wptr; ++rptr) {
+ func(rptr, B_FALSE, &qword);
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_HALF_TBL, rptr,
+ &verify);
+
+ if (!EFX_QWORD_IS_EQUAL(verify, qword)) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ }
+ }
+
+ /* And do the same negated */
+ for (wptr = 0, rptr = 0; wptr < rows; ++wptr) {
+ func(wptr, B_TRUE, &qword);
+ EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_HALF_TBL, wptr, &qword);
+
+ if ((wptr - rptr) < 64 && wptr < rows - 1)
+ continue;
+
+ EFSYS_SPIN(1);
+
+ for (; rptr <= wptr; ++rptr) {
+ func(rptr, B_TRUE, &qword);
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_HALF_TBL, rptr,
+ &verify);
+
+ if (!EFX_QWORD_IS_EQUAL(verify, qword)) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ }
+ }
+
+ /* Restore back to FULL buffer table mode */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
+
+ /*
+ * We don't need to reconfigure SRAM again because the API
+ * requires efx_nic_fini() to be called after an sram test.
+ */
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Restore back to FULL buffer table mode */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/seastar/dpdk/drivers/net/sfc/base/siena_vpd.c b/src/seastar/dpdk/drivers/net/sfc/base/siena_vpd.c
new file mode 100644
index 00000000..4fb2e426
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/base/siena_vpd.c
@@ -0,0 +1,618 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_VPD
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_vpd_get_static(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __deref_out_bcount_opt(*sizep) caddr_t *svpdp,
+ __out size_t *sizep)
+{
+ siena_mc_static_config_hdr_t *scfg;
+ caddr_t svpd;
+ size_t size;
+ uint8_t cksum;
+ unsigned int vpd_offset;
+ unsigned int vpd_length;
+ unsigned int hdr_length;
+ unsigned int pos;
+ unsigned int region;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 ||
+ partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1);
+
+ /* Allocate sufficient memory for the entire static cfg area */
+ if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0)
+ goto fail1;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, size, scfg);
+ if (scfg == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ if ((rc = siena_nvram_partn_read(enp, partn, 0,
+ (caddr_t)scfg, SIENA_NVRAM_CHUNK)) != 0)
+ goto fail3;
+
+ /* Verify the magic number */
+ if (EFX_DWORD_FIELD(scfg->magic, EFX_DWORD_0) !=
+ SIENA_MC_STATIC_CONFIG_MAGIC) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /* All future versions of the structure must be backwards compatible */
+ EFX_STATIC_ASSERT(SIENA_MC_STATIC_CONFIG_VERSION == 0);
+
+ hdr_length = EFX_WORD_FIELD(scfg->length, EFX_WORD_0);
+ vpd_offset = EFX_DWORD_FIELD(scfg->static_vpd_offset, EFX_DWORD_0);
+ vpd_length = EFX_DWORD_FIELD(scfg->static_vpd_length, EFX_DWORD_0);
+
+ /* Verify the hdr doesn't overflow the sector size */
+ if (hdr_length > size || vpd_offset > size || vpd_length > size ||
+ vpd_length + vpd_offset > size) {
+ rc = EINVAL;
+ goto fail5;
+ }
+
+ /* Read the remainder of scfg + static vpd */
+ region = vpd_offset + vpd_length;
+ if (region > SIENA_NVRAM_CHUNK) {
+ if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK,
+ (caddr_t)scfg + SIENA_NVRAM_CHUNK,
+ region - SIENA_NVRAM_CHUNK)) != 0)
+ goto fail6;
+ }
+
+ /* Verify checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)scfg)[pos];
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail7;
+ }
+
+ if (vpd_length == 0)
+ svpd = NULL;
+ else {
+ /* Copy the vpd data out */
+ EFSYS_KMEM_ALLOC(enp->en_esip, vpd_length, svpd);
+ if (svpd == NULL) {
+ rc = ENOMEM;
+ goto fail8;
+ }
+ memcpy(svpd, (caddr_t)scfg + vpd_offset, vpd_length);
+ }
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, scfg);
+
+ *svpdp = svpd;
+ *sizep = vpd_length;
+
+ return (0);
+
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, scfg);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_init(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ caddr_t svpd = NULL;
+ unsigned int partn;
+ size_t size = 0;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1;
+
+ /*
+ * We need the static VPD sector to present a unified static+dynamic
+ * VPD, that is, basically on every read, write, verify cycle. Since
+ * it should *never* change we can just cache it here.
+ */
+ if ((rc = siena_vpd_get_static(enp, partn, &svpd, &size)) != 0)
+ goto fail1;
+
+ if (svpd != NULL && size > 0) {
+ if ((rc = efx_vpd_hunk_verify(svpd, size, NULL)) != 0)
+ goto fail2;
+ }
+
+ enp->en_u.siena.enu_svpd = svpd;
+ enp->en_u.siena.enu_svpd_length = size;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, svpd);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /*
+ * This function returns the total size the user should allocate
+ * for all VPD operations. We've already cached the static vpd,
+ * so we just need to return an upper bound on the dynamic vpd.
+ * Since the dynamic_config structure can change under our feet,
+ * (as version numbers are inserted), just be safe and return the
+ * total size of the dynamic_config *sector*
+ */
+ partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_partn_size(enp, partn, sizep)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ unsigned int vpd_length;
+ unsigned int vpd_offset;
+ unsigned int dcfg_partn;
+ size_t dcfg_size;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ dcfg_partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_TRUE, &dcfg, &dcfg_size)) != 0)
+ goto fail1;
+
+ vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
+ vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
+
+ if (vpd_length > size) {
+ rc = EFAULT; /* Invalid dcfg: header bigger than sector */
+ goto fail2;
+ }
+
+ EFSYS_ASSERT3U(vpd_length, <=, size);
+ memcpy(data, (caddr_t)dcfg + vpd_offset, vpd_length);
+
+ /* Pad data with all-1s, consistent with update operations */
+ memset(data + vpd_length, 0xff, size - vpd_length);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_vpd_tag_t stag;
+ efx_vpd_tag_t dtag;
+ efx_vpd_keyword_t skey;
+ efx_vpd_keyword_t dkey;
+ unsigned int scont;
+ unsigned int dcont;
+
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /*
+ * Strictly you could take the view that dynamic vpd is optional.
+ * Instead, to conform more closely to the read/verify/reinit()
+ * paradigm, we require dynamic vpd. siena_vpd_reinit() will
+ * reinitialize it as required.
+ */
+ if ((rc = efx_vpd_hunk_verify(data, size, NULL)) != 0)
+ goto fail1;
+
+ /*
+ * Verify that there is no duplication between the static and
+ * dynamic cfg sectors.
+ */
+ if (enp->en_u.siena.enu_svpd_length == 0)
+ goto done;
+
+ dcont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(data, size, &dtag,
+ &dkey, NULL, NULL, &dcont)) != 0)
+ goto fail2;
+ if (dcont == 0)
+ break;
+
+ /*
+ * Skip the RV keyword. It should be present in both the static
+ * and dynamic cfg sectors.
+ */
+ if (dtag == EFX_VPD_RO && dkey == EFX_VPD_KEYWORD('R', 'V'))
+ continue;
+
+ scont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(
+ enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length, &stag, &skey,
+ NULL, NULL, &scont)) != 0)
+ goto fail3;
+ if (scont == 0)
+ break;
+
+ if (stag == dtag && skey == dkey) {
+ rc = EEXIST;
+ goto fail4;
+ }
+ }
+ }
+
+done:
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ boolean_t wantpid;
+ efx_rc_t rc;
+
+ /*
+ * Only create a PID if the dynamic cfg doesn't have one
+ */
+ if (enp->en_u.siena.enu_svpd_length == 0)
+ wantpid = B_TRUE;
+ else {
+ unsigned int offset;
+ uint8_t length;
+
+ rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length,
+ EFX_VPD_ID, 0, &offset, &length);
+ if (rc == 0)
+ wantpid = B_FALSE;
+ else if (rc == ENOENT)
+ wantpid = B_TRUE;
+ else
+ goto fail1;
+ }
+
+ if ((rc = efx_vpd_hunk_reinit(data, size, wantpid)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp)
+{
+ unsigned int offset;
+ uint8_t length;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Attempt to satisfy the request from svpd first */
+ if (enp->en_u.siena.enu_svpd_length > 0) {
+ if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ evvp->evv_length = length;
+ memcpy(evvp->evv_value,
+ enp->en_u.siena.enu_svpd + offset, length);
+ return (0);
+ } else if (rc != ENOENT)
+ goto fail1;
+ }
+
+ /* And then from the provided data buffer */
+ if ((rc = efx_vpd_hunk_get(data, size, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) != 0) {
+ if (rc == ENOENT)
+ return (rc);
+
+ goto fail2;
+ }
+
+ evvp->evv_length = length;
+ memcpy(evvp->evv_value, data + offset, length);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* If the provided (tag,keyword) exists in svpd, then it is readonly */
+ if (enp->en_u.siena.enu_svpd_length > 0) {
+ unsigned int offset;
+ uint8_t length;
+
+ if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ rc = EACCES;
+ goto fail1;
+ }
+ }
+
+ if ((rc = efx_vpd_hunk_set(data, size, evvp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp)
+{
+ _NOTE(ARGUNUSED(enp, data, size, evvp, contp))
+
+ return (ENOTSUP);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ unsigned int vpd_offset;
+ unsigned int dcfg_partn;
+ unsigned int hdr_length;
+ unsigned int pos;
+ uint8_t cksum;
+ size_t partn_size, dcfg_size;
+ size_t vpd_length;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Determine total length of all tags */
+ if ((rc = efx_vpd_hunk_length(data, size, &vpd_length)) != 0)
+ goto fail1;
+
+ /* Lock dynamic config sector for write, and read structure only */
+ dcfg_partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_partn_size(enp, dcfg_partn, &partn_size)) != 0)
+ goto fail2;
+
+ if ((rc = siena_nvram_partn_lock(enp, dcfg_partn)) != 0)
+ goto fail3;
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_FALSE, &dcfg, &dcfg_size)) != 0)
+ goto fail4;
+
+ hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
+
+ /* Allocated memory should have room for the new VPD */
+ if (hdr_length + vpd_length > dcfg_size) {
+ rc = ENOSPC;
+ goto fail5;
+ }
+
+ /* Copy in new vpd and update header */
+ vpd_offset = dcfg_size - vpd_length;
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset, EFX_DWORD_0, vpd_offset);
+ memcpy((caddr_t)dcfg + vpd_offset, data, vpd_length);
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_length, EFX_DWORD_0, vpd_length);
+
+ /* Update the checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)dcfg)[pos];
+ dcfg->csum.eb_u8[0] -= cksum;
+
+ /* Erase and write the new sector */
+ if ((rc = siena_nvram_partn_erase(enp, dcfg_partn, 0, partn_size)) != 0)
+ goto fail6;
+
+ /* Write out the new structure to nvram */
+ if ((rc = siena_nvram_partn_write(enp, dcfg_partn, 0, (caddr_t)dcfg,
+ vpd_offset + vpd_length)) != 0)
+ goto fail7;
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+
+ siena_nvram_partn_unlock(enp, dcfg_partn);
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ siena_nvram_partn_unlock(enp, dcfg_partn);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_vpd_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ if (enp->en_u.siena.enu_svpd_length > 0) {
+ EFSYS_KMEM_FREE(enp->en_esip, enp->en_u.siena.enu_svpd_length,
+ enp->en_u.siena.enu_svpd);
+
+ enp->en_u.siena.enu_svpd = NULL;
+ enp->en_u.siena.enu_svpd_length = 0;
+ }
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+#endif /* EFSYS_OPT_VPD */
diff --git a/src/seastar/dpdk/drivers/net/sfc/efsys.h b/src/seastar/dpdk/drivers/net/sfc/efsys.h
new file mode 100644
index 00000000..0405d02b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/efsys.h
@@ -0,0 +1,780 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_COMMON_EFSYS_H
+#define _SFC_COMMON_EFSYS_H
+
+#include <stdbool.h>
+
+#include <rte_spinlock.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_memzone.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+#include <rte_io.h>
+
+#include "sfc_debug.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EFSYS_HAS_UINT64 1
+#define EFSYS_USE_UINT64 1
+#define EFSYS_HAS_SSE2_M128 1
+
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+#define EFSYS_IS_BIG_ENDIAN 1
+#define EFSYS_IS_LITTLE_ENDIAN 0
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#define EFSYS_IS_BIG_ENDIAN 0
+#define EFSYS_IS_LITTLE_ENDIAN 1
+#else
+#error "Cannot determine system endianness"
+#endif
+#include "efx_types.h"
+
+
+#ifndef _NOTE
+#define _NOTE(s)
+#endif
+
+typedef bool boolean_t;
+
+#ifndef B_FALSE
+#define B_FALSE false
+#endif
+#ifndef B_TRUE
+#define B_TRUE true
+#endif
+
+/*
+ * RTE_MAX() and RTE_MIN() cannot be used since braced-group within
+ * expression allowed only inside a function, but MAX() is used as
+ * a number of elements in array.
+ */
+#ifndef MAX
+#define MAX(v1, v2) ((v1) > (v2) ? (v1) : (v2))
+#endif
+#ifndef MIN
+#define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2))
+#endif
+
+/* There are macros for alignment in DPDK, but we need to make a proper
+ * correspondence here, if we want to re-use them at all
+ */
+#ifndef IS_P2ALIGNED
+#define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
+#endif
+
+#ifndef P2ROUNDUP
+#define P2ROUNDUP(x, align) (-(-(x) & -(align)))
+#endif
+
+#ifndef P2ALIGN
+#define P2ALIGN(_x, _a) ((_x) & -(_a))
+#endif
+
+#ifndef IS2P
+#define ISP2(x) rte_is_power_of_2(x)
+#endif
+
+#define ENOTACTIVE ENOTCONN
+
+static inline void
+prefetch_read_many(const volatile void *addr)
+{
+ rte_prefetch0(addr);
+}
+
+static inline void
+prefetch_read_once(const volatile void *addr)
+{
+ rte_prefetch_non_temporal(addr);
+}
+
+/* Modifiers used for Windows builds */
+#define __in
+#define __in_opt
+#define __in_ecount(_n)
+#define __in_ecount_opt(_n)
+#define __in_bcount(_n)
+#define __in_bcount_opt(_n)
+
+#define __out
+#define __out_opt
+#define __out_ecount(_n)
+#define __out_ecount_opt(_n)
+#define __out_bcount(_n)
+#define __out_bcount_opt(_n)
+
+#define __deref_out
+
+#define __inout
+#define __inout_opt
+#define __inout_ecount(_n)
+#define __inout_ecount_opt(_n)
+#define __inout_bcount(_n)
+#define __inout_bcount_opt(_n)
+#define __inout_bcount_full_opt(_n)
+
+#define __deref_out_bcount_opt(n)
+
+#define __checkReturn
+#define __success(_x)
+
+#define __drv_when(_p, _c)
+
+/* Code inclusion options */
+
+
+#define EFSYS_OPT_NAMES 1
+
+/* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */
+#define EFSYS_OPT_SIENA 0
+/* Enable SFN7xxx support */
+#define EFSYS_OPT_HUNTINGTON 1
+/* Enable SFN8xxx support */
+#define EFSYS_OPT_MEDFORD 1
+#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
+#define EFSYS_OPT_CHECK_REG 1
+#else
+#define EFSYS_OPT_CHECK_REG 0
+#endif
+
+/* MCDI is required for SFN7xxx and SFN8xx */
+#define EFSYS_OPT_MCDI 1
+#define EFSYS_OPT_MCDI_LOGGING 1
+#define EFSYS_OPT_MCDI_PROXY_AUTH 1
+
+#define EFSYS_OPT_MAC_STATS 1
+
+#define EFSYS_OPT_LOOPBACK 0
+
+#define EFSYS_OPT_MON_MCDI 0
+#define EFSYS_OPT_MON_STATS 0
+
+#define EFSYS_OPT_PHY_STATS 0
+#define EFSYS_OPT_BIST 0
+#define EFSYS_OPT_PHY_LED_CONTROL 0
+#define EFSYS_OPT_PHY_FLAGS 0
+
+#define EFSYS_OPT_VPD 0
+#define EFSYS_OPT_NVRAM 0
+#define EFSYS_OPT_BOOTCFG 0
+
+#define EFSYS_OPT_DIAG 0
+#define EFSYS_OPT_RX_SCALE 1
+#define EFSYS_OPT_QSTATS 0
+/* Filters support is required for SFN7xxx and SFN8xx */
+#define EFSYS_OPT_FILTER 1
+#define EFSYS_OPT_RX_SCATTER 0
+
+#define EFSYS_OPT_EV_PREFETCH 0
+
+#define EFSYS_OPT_DECODE_INTR_FATAL 0
+
+#define EFSYS_OPT_LICENSING 0
+
+#define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
+
+#define EFSYS_OPT_RX_PACKED_STREAM 0
+
+/* ID */
+
+typedef struct __efsys_identifier_s efsys_identifier_t;
+
+
+#define EFSYS_PROBE(_name) \
+ do { } while (0)
+
+#define EFSYS_PROBE1(_name, _type1, _arg1) \
+ do { } while (0)
+
+#define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
+ do { } while (0)
+
+#define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3) \
+ do { } while (0)
+
+#define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4) \
+ do { } while (0)
+
+#define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5) \
+ do { } while (0)
+
+#define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6) \
+ do { } while (0)
+
+#define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6, _type7, _arg7) \
+ do { } while (0)
+
+
+/* DMA */
+
+typedef phys_addr_t efsys_dma_addr_t;
+
+typedef struct efsys_mem_s {
+ const struct rte_memzone *esm_mz;
+ /*
+ * Ideally it should have volatile qualifier to denote that
+ * the memory may be updated by someone else. However, it adds
+ * qualifier discard warnings when the pointer or its derivative
+ * is passed to memset() or rte_mov16().
+ * So, skip the qualifier here, but make sure that it is added
+ * below in access macros.
+ */
+ void *esm_base;
+ efsys_dma_addr_t esm_addr;
+} efsys_mem_t;
+
+
+#define EFSYS_MEM_ZERO(_esmp, _size) \
+ do { \
+ (void)memset((void *)(_esmp)->esm_base, 0, (_size)); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_READD(_esmp, _offset, _edp) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile uint32_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
+ \
+ _addr = (volatile uint32_t *)(_base + (_offset)); \
+ (_edp)->ed_u32[0] = _addr[0]; \
+ \
+ EFSYS_PROBE2(mem_readl, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile uint64_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
+ \
+ _addr = (volatile uint64_t *)(_base + (_offset)); \
+ (_eqp)->eq_u64[0] = _addr[0]; \
+ \
+ EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_READO(_esmp, _offset, _eop) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile __m128i *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
+ \
+ _addr = (volatile __m128i *)(_base + (_offset)); \
+ (_eop)->eo_u128[0] = _addr[0]; \
+ \
+ EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+
+#define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile uint32_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
+ \
+ EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _addr = (volatile uint32_t *)(_base + (_offset)); \
+ _addr[0] = (_edp)->ed_u32[0]; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile uint64_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
+ \
+ EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ _addr = (volatile uint64_t *)(_base + (_offset)); \
+ _addr[0] = (_eqp)->eq_u64[0]; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile __m128i *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
+ \
+ \
+ EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _addr = (volatile __m128i *)(_base + (_offset)); \
+ _addr[0] = (_eop)->eo_u128[0]; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+
+#define EFSYS_MEM_ADDR(_esmp) \
+ ((_esmp)->esm_addr)
+
+#define EFSYS_MEM_IS_NULL(_esmp) \
+ ((_esmp)->esm_base == NULL)
+
+#define EFSYS_MEM_PREFETCH(_esmp, _offset) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ \
+ rte_prefetch0(_base + (_offset)); \
+ } while (0)
+
+
+/* BAR */
+
+typedef struct efsys_bar_s {
+ rte_spinlock_t esb_lock;
+ int esb_rid;
+ struct rte_pci_device *esb_dev;
+ /*
+ * Ideally it should have volatile qualifier to denote that
+ * the memory may be updated by someone else. However, it adds
+ * qualifier discard warnings when the pointer or its derivative
+ * is passed to memset() or rte_mov16().
+ * So, skip the qualifier here, but make sure that it is added
+ * below in access macros.
+ */
+ void *esb_base;
+} efsys_bar_t;
+
+#define SFC_BAR_LOCK_INIT(_esbp, _ifname) \
+ do { \
+ rte_spinlock_init(&(_esbp)->esb_lock); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+#define SFC_BAR_LOCK_DESTROY(_esbp) ((void)0)
+#define SFC_BAR_LOCK(_esbp) rte_spinlock_lock(&(_esbp)->esb_lock)
+#define SFC_BAR_UNLOCK(_esbp) rte_spinlock_unlock(&(_esbp)->esb_lock)
+
+#define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile uint32_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ _addr = (volatile uint32_t *)(_base + (_offset)); \
+ rte_rmb(); \
+ (_edp)->ed_u32[0] = rte_read32_relaxed(_addr); \
+ \
+ EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile uint64_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
+ \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ _addr = (volatile uint64_t *)(_base + (_offset)); \
+ rte_rmb(); \
+ (_eqp)->eq_u64[0] = rte_read64_relaxed(_addr); \
+ \
+ EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile __m128i *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ _addr = (volatile __m128i *)(_base + (_offset)); \
+ rte_rmb(); \
+ /* There is no rte_read128_relaxed() yet */ \
+ (_eop)->eo_u128[0] = _addr[0]; \
+ \
+ EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+
+#define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile uint32_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _addr = (volatile uint32_t *)(_base + (_offset)); \
+ rte_write32_relaxed((_edp)->ed_u32[0], _addr); \
+ rte_wmb(); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile uint64_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
+ \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ _addr = (volatile uint64_t *)(_base + (_offset)); \
+ rte_write64_relaxed((_eqp)->eq_u64[0], _addr); \
+ rte_wmb(); \
+ \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/*
+ * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
+ * (required by PIO hardware).
+ *
+ * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support
+ * write-combined memory mapped to user-land, so just abort if used.
+ */
+#define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \
+ do { \
+ rte_panic("Write-combined BAR access not supported"); \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile __m128i *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _addr = (volatile __m128i *)(_base + (_offset)); \
+ /* There is no rte_write128_relaxed() yet */ \
+ _addr[0] = (_eop)->eo_u128[0]; \
+ rte_wmb(); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* Use the standard octo-word write for doorbell writes */
+#define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \
+ do { \
+ EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* SPIN */
+
+#define EFSYS_SPIN(_us) \
+ do { \
+ rte_delay_us(_us); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_SLEEP EFSYS_SPIN
+
+/* BARRIERS */
+
+#define EFSYS_MEM_READ_BARRIER() rte_rmb()
+#define EFSYS_PIO_WRITE_BARRIER() rte_io_wmb()
+
+/* DMA SYNC */
+
+/*
+ * DPDK does not provide any DMA syncing API, and no PMD drivers
+ * have any traces of explicit DMA syncing.
+ * DMA mapping is assumed to be coherent.
+ */
+
+#define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
+
+/* Just avoid store and compiler (impliciltly) reordering */
+#define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
+
+/* TIMESTAMP */
+
+typedef uint64_t efsys_timestamp_t;
+
+#define EFSYS_TIMESTAMP(_usp) \
+ do { \
+ *(_usp) = rte_get_timer_cycles() * 1000000 / \
+ rte_get_timer_hz(); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* KMEM */
+
+#define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
+ do { \
+ (_esip) = (_esip); \
+ (_p) = rte_zmalloc("sfc", (_size), 0); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_KMEM_FREE(_esip, _size, _p) \
+ do { \
+ (void)(_esip); \
+ (void)(_size); \
+ rte_free((_p)); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* LOCK */
+
+typedef rte_spinlock_t efsys_lock_t;
+
+#define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \
+ rte_spinlock_init((_eslp))
+#define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0)
+#define SFC_EFSYS_LOCK(_eslp) \
+ rte_spinlock_lock((_eslp))
+#define SFC_EFSYS_UNLOCK(_eslp) \
+ rte_spinlock_unlock((_eslp))
+#define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp) \
+ SFC_ASSERT(rte_spinlock_is_locked((_eslp)))
+
+typedef int efsys_lock_state_t;
+
+#define EFSYS_LOCK_MAGIC 0x000010c4
+
+#define EFSYS_LOCK(_lockp, _state) \
+ do { \
+ SFC_EFSYS_LOCK(_lockp); \
+ (_state) = EFSYS_LOCK_MAGIC; \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_UNLOCK(_lockp, _state) \
+ do { \
+ SFC_ASSERT((_state) == EFSYS_LOCK_MAGIC); \
+ SFC_EFSYS_UNLOCK(_lockp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* STAT */
+
+typedef uint64_t efsys_stat_t;
+
+#define EFSYS_STAT_INCR(_knp, _delta) \
+ do { \
+ *(_knp) += (_delta); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_DECR(_knp, _delta) \
+ do { \
+ *(_knp) -= (_delta); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SET(_knp, _val) \
+ do { \
+ *(_knp) = (_val); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SET_QWORD(_knp, _valp) \
+ do { \
+ *(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SET_DWORD(_knp, _valp) \
+ do { \
+ *(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
+ do { \
+ *(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
+ do { \
+ *(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* ERR */
+
+#if EFSYS_OPT_DECODE_INTR_FATAL
+#define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
+ do { \
+ (void)(_esip); \
+ RTE_LOG(ERR, PMD, "FATAL ERROR #%u (0x%08x%08x)\n", \
+ (_code), (_dword0), (_dword1)); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+#endif
+
+/* ASSERT */
+
+/* RTE_VERIFY from DPDK treats expressions with % operator incorrectly,
+ * so we re-implement it here
+ */
+#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
+#define EFSYS_ASSERT(_exp) \
+ do { \
+ if (unlikely(!(_exp))) \
+ rte_panic("line %d\tassert \"%s\" failed\n", \
+ __LINE__, (#_exp)); \
+ } while (0)
+#else
+#define EFSYS_ASSERT(_exp) (void)(_exp)
+#endif
+
+#define EFSYS_ASSERT3(_x, _op, _y, _t) EFSYS_ASSERT((_t)(_x) _op (_t)(_y))
+
+#define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
+#define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
+#define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
+
+/* ROTATE */
+
+#define EFSYS_HAS_ROTL_DWORD 0
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SFC_COMMON_EFSYS_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/rte_pmd_sfc_efx_version.map b/src/seastar/dpdk/drivers/net/sfc/rte_pmd_sfc_efx_version.map
new file mode 100644
index 00000000..31eca32e
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/rte_pmd_sfc_efx_version.map
@@ -0,0 +1,4 @@
+DPDK_17.02 {
+
+ local: *;
+};
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc.c b/src/seastar/dpdk/drivers/net/sfc/sfc.c
new file mode 100644
index 00000000..4e241b22
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc.c
@@ -0,0 +1,750 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* sysconf() */
+#include <unistd.h>
+
+#include <rte_errno.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+#include "sfc_rx.h"
+#include "sfc_tx.h"
+
+
+int
+sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
+ size_t len, int socket_id, efsys_mem_t *esmp)
+{
+ const struct rte_memzone *mz;
+
+ sfc_log_init(sa, "name=%s id=%u len=%lu socket_id=%d",
+ name, id, len, socket_id);
+
+ mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
+ sysconf(_SC_PAGESIZE), socket_id);
+ if (mz == NULL) {
+ sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
+ name, (unsigned int)id, (unsigned int)len, socket_id,
+ rte_strerror(rte_errno));
+ return ENOMEM;
+ }
+
+ esmp->esm_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ if (esmp->esm_addr == RTE_BAD_PHYS_ADDR) {
+ (void)rte_memzone_free(mz);
+ return EFAULT;
+ }
+
+ esmp->esm_mz = mz;
+ esmp->esm_base = mz->addr;
+
+ return 0;
+}
+
+void
+sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
+{
+ int rc;
+
+ sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
+
+ rc = rte_memzone_free(esmp->esm_mz);
+ if (rc != 0)
+ sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
+
+ memset(esmp, 0, sizeof(*esmp));
+}
+
+static uint32_t
+sfc_phy_cap_from_link_speeds(uint32_t speeds)
+{
+ uint32_t phy_caps = 0;
+
+ if (~speeds & ETH_LINK_SPEED_FIXED) {
+ phy_caps |= (1 << EFX_PHY_CAP_AN);
+ /*
+ * If no speeds are specified in the mask, any supported
+ * may be negotiated
+ */
+ if (speeds == ETH_LINK_SPEED_AUTONEG)
+ phy_caps |=
+ (1 << EFX_PHY_CAP_1000FDX) |
+ (1 << EFX_PHY_CAP_10000FDX) |
+ (1 << EFX_PHY_CAP_40000FDX);
+ }
+ if (speeds & ETH_LINK_SPEED_1G)
+ phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
+ if (speeds & ETH_LINK_SPEED_10G)
+ phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
+ if (speeds & ETH_LINK_SPEED_40G)
+ phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
+
+ return phy_caps;
+}
+
+/*
+ * Check requested device level configuration.
+ * Receive and transmit configuration is checked in corresponding
+ * modules.
+ */
+static int
+sfc_check_conf(struct sfc_adapter *sa)
+{
+ const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
+ int rc = 0;
+
+ sa->port.phy_adv_cap =
+ sfc_phy_cap_from_link_speeds(conf->link_speeds) &
+ sa->port.phy_adv_cap_mask;
+ if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
+ sfc_err(sa, "No link speeds from mask %#x are supported",
+ conf->link_speeds);
+ rc = EINVAL;
+ }
+
+ if (conf->lpbk_mode != 0) {
+ sfc_err(sa, "Loopback not supported");
+ rc = EINVAL;
+ }
+
+ if (conf->dcb_capability_en != 0) {
+ sfc_err(sa, "Priority-based flow control not supported");
+ rc = EINVAL;
+ }
+
+ if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+ sfc_err(sa, "Flow Director not supported");
+ rc = EINVAL;
+ }
+
+ if ((conf->intr_conf.lsc != 0) &&
+ (sa->intr.type != EFX_INTR_LINE) &&
+ (sa->intr.type != EFX_INTR_MESSAGE)) {
+ sfc_err(sa, "Link status change interrupt not supported");
+ rc = EINVAL;
+ }
+
+ if (conf->intr_conf.rxq != 0) {
+ sfc_err(sa, "Receive queue interrupt not supported");
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+/*
+ * Find out maximum number of receive and transmit queues which could be
+ * advertised.
+ *
+ * NIC is kept initialized on success to allow other modules acquire
+ * defaults and capabilities.
+ */
+static int
+sfc_estimate_resource_limits(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ efx_drv_limits_t limits;
+ int rc;
+ uint32_t evq_allocated;
+ uint32_t rxq_allocated;
+ uint32_t txq_allocated;
+
+ memset(&limits, 0, sizeof(limits));
+
+ /* Request at least one Rx and Tx queue */
+ limits.edl_min_rxq_count = 1;
+ limits.edl_min_txq_count = 1;
+ /* Management event queue plus event queue for each Tx and Rx queue */
+ limits.edl_min_evq_count =
+ 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
+
+ /* Divide by number of functions to guarantee that all functions
+ * will get promised resources
+ */
+ /* FIXME Divide by number of functions (not 2) below */
+ limits.edl_max_evq_count = encp->enc_evq_limit / 2;
+ SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
+
+ /* Split equally between receive and transmit */
+ limits.edl_max_rxq_count =
+ MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
+ SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
+
+ limits.edl_max_txq_count =
+ MIN(encp->enc_txq_limit,
+ limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
+
+ if (sa->tso)
+ limits.edl_max_txq_count =
+ MIN(limits.edl_max_txq_count,
+ encp->enc_fw_assisted_tso_v2_n_contexts /
+ encp->enc_hw_pf_count);
+
+ SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
+
+ /* Configure the minimum required resources needed for the
+ * driver to operate, and the maximum desired resources that the
+ * driver is capable of using.
+ */
+ efx_nic_set_drv_limits(sa->nic, &limits);
+
+ sfc_log_init(sa, "init nic");
+ rc = efx_nic_init(sa->nic);
+ if (rc != 0)
+ goto fail_nic_init;
+
+ /* Find resource dimensions assigned by firmware to this function */
+ rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
+ &txq_allocated);
+ if (rc != 0)
+ goto fail_get_vi_pool;
+
+ /* It still may allocate more than maximum, ensure limit */
+ evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
+ rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
+ txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
+
+ /* Subtract management EVQ not used for traffic */
+ SFC_ASSERT(evq_allocated > 0);
+ evq_allocated--;
+
+ /* Right now we use separate EVQ for Rx and Tx */
+ sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
+ sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
+
+ /* Keep NIC initialized */
+ return 0;
+
+fail_get_vi_pool:
+fail_nic_init:
+ efx_nic_fini(sa->nic);
+ return rc;
+}
+
+static int
+sfc_set_drv_limits(struct sfc_adapter *sa)
+{
+ const struct rte_eth_dev_data *data = sa->eth_dev->data;
+ efx_drv_limits_t lim;
+
+ memset(&lim, 0, sizeof(lim));
+
+ /* Limits are strict since take into account initial estimation */
+ lim.edl_min_evq_count = lim.edl_max_evq_count =
+ 1 + data->nb_rx_queues + data->nb_tx_queues;
+ lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
+ lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
+
+ return efx_nic_set_drv_limits(sa->nic, &lim);
+}
+
+int
+sfc_start(struct sfc_adapter *sa)
+{
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ switch (sa->state) {
+ case SFC_ADAPTER_CONFIGURED:
+ break;
+ case SFC_ADAPTER_STARTED:
+ sfc_info(sa, "already started");
+ return 0;
+ default:
+ rc = EINVAL;
+ goto fail_bad_state;
+ }
+
+ sa->state = SFC_ADAPTER_STARTING;
+
+ sfc_log_init(sa, "set resource limits");
+ rc = sfc_set_drv_limits(sa);
+ if (rc != 0)
+ goto fail_set_drv_limits;
+
+ sfc_log_init(sa, "init nic");
+ rc = efx_nic_init(sa->nic);
+ if (rc != 0)
+ goto fail_nic_init;
+
+ rc = sfc_intr_start(sa);
+ if (rc != 0)
+ goto fail_intr_start;
+
+ rc = sfc_ev_start(sa);
+ if (rc != 0)
+ goto fail_ev_start;
+
+ rc = sfc_port_start(sa);
+ if (rc != 0)
+ goto fail_port_start;
+
+ rc = sfc_rx_start(sa);
+ if (rc != 0)
+ goto fail_rx_start;
+
+ rc = sfc_tx_start(sa);
+ if (rc != 0)
+ goto fail_tx_start;
+
+ rc = sfc_flow_start(sa);
+ if (rc != 0)
+ goto fail_flows_insert;
+
+ sa->state = SFC_ADAPTER_STARTED;
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_flows_insert:
+ sfc_tx_stop(sa);
+
+fail_tx_start:
+ sfc_rx_stop(sa);
+
+fail_rx_start:
+ sfc_port_stop(sa);
+
+fail_port_start:
+ sfc_ev_stop(sa);
+
+fail_ev_start:
+ sfc_intr_stop(sa);
+
+fail_intr_start:
+ efx_nic_fini(sa->nic);
+
+fail_nic_init:
+fail_set_drv_limits:
+ sa->state = SFC_ADAPTER_CONFIGURED;
+fail_bad_state:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_stop(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ switch (sa->state) {
+ case SFC_ADAPTER_STARTED:
+ break;
+ case SFC_ADAPTER_CONFIGURED:
+ sfc_info(sa, "already stopped");
+ return;
+ default:
+ sfc_err(sa, "stop in unexpected state %u", sa->state);
+ SFC_ASSERT(B_FALSE);
+ return;
+ }
+
+ sa->state = SFC_ADAPTER_STOPPING;
+
+ sfc_flow_stop(sa);
+ sfc_tx_stop(sa);
+ sfc_rx_stop(sa);
+ sfc_port_stop(sa);
+ sfc_ev_stop(sa);
+ sfc_intr_stop(sa);
+ efx_nic_fini(sa->nic);
+
+ sa->state = SFC_ADAPTER_CONFIGURED;
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_configure(struct sfc_adapter *sa)
+{
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED ||
+ sa->state == SFC_ADAPTER_CONFIGURED);
+ sa->state = SFC_ADAPTER_CONFIGURING;
+
+ rc = sfc_check_conf(sa);
+ if (rc != 0)
+ goto fail_check_conf;
+
+ rc = sfc_intr_configure(sa);
+ if (rc != 0)
+ goto fail_intr_configure;
+
+ rc = sfc_port_configure(sa);
+ if (rc != 0)
+ goto fail_port_configure;
+
+ rc = sfc_rx_configure(sa);
+ if (rc != 0)
+ goto fail_rx_configure;
+
+ rc = sfc_tx_configure(sa);
+ if (rc != 0)
+ goto fail_tx_configure;
+
+ sa->state = SFC_ADAPTER_CONFIGURED;
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_tx_configure:
+ sfc_rx_close(sa);
+
+fail_rx_configure:
+ sfc_port_close(sa);
+
+fail_port_configure:
+ sfc_intr_close(sa);
+
+fail_intr_configure:
+fail_check_conf:
+ sa->state = SFC_ADAPTER_INITIALIZED;
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_close(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
+ sa->state = SFC_ADAPTER_CLOSING;
+
+ sfc_tx_close(sa);
+ sfc_rx_close(sa);
+ sfc_port_close(sa);
+ sfc_intr_close(sa);
+
+ sa->state = SFC_ADAPTER_INITIALIZED;
+ sfc_log_init(sa, "done");
+}
+
+static int
+sfc_mem_bar_init(struct sfc_adapter *sa)
+{
+ struct rte_eth_dev *eth_dev = sa->eth_dev;
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(eth_dev);
+ efsys_bar_t *ebp = &sa->mem_bar;
+ unsigned int i;
+ struct rte_mem_resource *res;
+
+ for (i = 0; i < RTE_DIM(pci_dev->mem_resource); i++) {
+ res = &pci_dev->mem_resource[i];
+ if ((res->len != 0) && (res->phys_addr != 0)) {
+ /* Found first memory BAR */
+ SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
+ ebp->esb_rid = i;
+ ebp->esb_dev = pci_dev;
+ ebp->esb_base = res->addr;
+ return 0;
+ }
+ }
+
+ return EFAULT;
+}
+
+static void
+sfc_mem_bar_fini(struct sfc_adapter *sa)
+{
+ efsys_bar_t *ebp = &sa->mem_bar;
+
+ SFC_BAR_LOCK_DESTROY(ebp);
+ memset(ebp, 0, sizeof(*ebp));
+}
+
+#if EFSYS_OPT_RX_SCALE
+/*
+ * A fixed RSS key which has a property of being symmetric
+ * (symmetrical flows are distributed to the same CPU)
+ * and also known to give a uniform distribution
+ * (a good distribution of traffic between different CPUs)
+ */
+static const uint8_t default_rss_key[SFC_RSS_KEY_SIZE] = {
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+};
+#endif
+
+static int
+sfc_set_rss_defaults(struct sfc_adapter *sa)
+{
+#if EFSYS_OPT_RX_SCALE
+ int rc;
+
+ rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
+ if (rc != 0)
+ goto fail_intr_init;
+
+ rc = efx_ev_init(sa->nic);
+ if (rc != 0)
+ goto fail_ev_init;
+
+ rc = efx_rx_init(sa->nic);
+ if (rc != 0)
+ goto fail_rx_init;
+
+ rc = efx_rx_scale_support_get(sa->nic, &sa->rss_support);
+ if (rc != 0)
+ goto fail_scale_support_get;
+
+ rc = efx_rx_hash_support_get(sa->nic, &sa->hash_support);
+ if (rc != 0)
+ goto fail_hash_support_get;
+
+ efx_rx_fini(sa->nic);
+ efx_ev_fini(sa->nic);
+ efx_intr_fini(sa->nic);
+
+ sa->rss_hash_types = sfc_rte_to_efx_hash_type(SFC_RSS_OFFLOADS);
+
+ rte_memcpy(sa->rss_key, default_rss_key, sizeof(sa->rss_key));
+
+ return 0;
+
+fail_hash_support_get:
+fail_scale_support_get:
+fail_rx_init:
+ efx_ev_fini(sa->nic);
+
+fail_ev_init:
+ efx_intr_fini(sa->nic);
+
+fail_intr_init:
+ return rc;
+#else
+ return 0;
+#endif
+}
+
+int
+sfc_attach(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp;
+ efx_nic_t *enp = sa->nic;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ efx_mcdi_new_epoch(enp);
+
+ sfc_log_init(sa, "reset nic");
+ rc = efx_nic_reset(enp);
+ if (rc != 0)
+ goto fail_nic_reset;
+
+ encp = efx_nic_cfg_get(sa->nic);
+
+ if (sa->dp_tx->features & SFC_DP_TX_FEAT_TSO) {
+ sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
+ if (!sa->tso)
+ sfc_warn(sa,
+ "TSO support isn't available on this adapter");
+ }
+
+ sfc_log_init(sa, "estimate resource limits");
+ rc = sfc_estimate_resource_limits(sa);
+ if (rc != 0)
+ goto fail_estimate_rsrc_limits;
+
+ sa->txq_max_entries = encp->enc_txq_max_ndescs;
+ SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
+
+ rc = sfc_intr_attach(sa);
+ if (rc != 0)
+ goto fail_intr_attach;
+
+ rc = sfc_ev_attach(sa);
+ if (rc != 0)
+ goto fail_ev_attach;
+
+ rc = sfc_port_attach(sa);
+ if (rc != 0)
+ goto fail_port_attach;
+
+ rc = sfc_set_rss_defaults(sa);
+ if (rc != 0)
+ goto fail_set_rss_defaults;
+
+ rc = sfc_filter_attach(sa);
+ if (rc != 0)
+ goto fail_filter_attach;
+
+ sfc_log_init(sa, "fini nic");
+ efx_nic_fini(enp);
+
+ sfc_flow_init(sa);
+
+ sa->state = SFC_ADAPTER_INITIALIZED;
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_filter_attach:
+fail_set_rss_defaults:
+ sfc_port_detach(sa);
+
+fail_port_attach:
+ sfc_ev_detach(sa);
+
+fail_ev_attach:
+ sfc_intr_detach(sa);
+
+fail_intr_attach:
+ efx_nic_fini(sa->nic);
+
+fail_estimate_rsrc_limits:
+fail_nic_reset:
+
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_detach(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ sfc_flow_fini(sa);
+
+ sfc_filter_detach(sa);
+ sfc_port_detach(sa);
+ sfc_ev_detach(sa);
+ sfc_intr_detach(sa);
+
+ sa->state = SFC_ADAPTER_UNINITIALIZED;
+}
+
+int
+sfc_probe(struct sfc_adapter *sa)
+{
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+ efx_nic_t *enp;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ sa->socket_id = rte_socket_id();
+
+ sfc_log_init(sa, "init mem bar");
+ rc = sfc_mem_bar_init(sa);
+ if (rc != 0)
+ goto fail_mem_bar_init;
+
+ sfc_log_init(sa, "get family");
+ rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
+ &sa->family);
+ if (rc != 0)
+ goto fail_family;
+ sfc_log_init(sa, "family is %u", sa->family);
+
+ sfc_log_init(sa, "create nic");
+ rte_spinlock_init(&sa->nic_lock);
+ rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
+ &sa->mem_bar, &sa->nic_lock, &enp);
+ if (rc != 0)
+ goto fail_nic_create;
+ sa->nic = enp;
+
+ rc = sfc_mcdi_init(sa);
+ if (rc != 0)
+ goto fail_mcdi_init;
+
+ sfc_log_init(sa, "probe nic");
+ rc = efx_nic_probe(enp);
+ if (rc != 0)
+ goto fail_nic_probe;
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_nic_probe:
+ sfc_mcdi_fini(sa);
+
+fail_mcdi_init:
+ sfc_log_init(sa, "destroy nic");
+ sa->nic = NULL;
+ efx_nic_destroy(enp);
+
+fail_nic_create:
+fail_family:
+ sfc_mem_bar_fini(sa);
+
+fail_mem_bar_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_unprobe(struct sfc_adapter *sa)
+{
+ efx_nic_t *enp = sa->nic;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ sfc_log_init(sa, "unprobe nic");
+ efx_nic_unprobe(enp);
+
+ sfc_mcdi_fini(sa);
+
+ sfc_log_init(sa, "destroy nic");
+ sa->nic = NULL;
+ efx_nic_destroy(enp);
+
+ sfc_mem_bar_fini(sa);
+
+ sfc_flow_fini(sa);
+ sa->state = SFC_ADAPTER_UNINITIALIZED;
+}
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc.h b/src/seastar/dpdk/drivers/net/sfc/sfc.h
new file mode 100644
index 00000000..fad0ce04
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc.h
@@ -0,0 +1,322 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_H
+#define _SFC_H
+
+#include <stdbool.h>
+
+#include <rte_ethdev.h>
+#include <rte_kvargs.h>
+#include <rte_spinlock.h>
+
+#include "efx.h"
+
+#include "sfc_filter.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SFC_DEV_TO_PCI(eth_dev) \
+ RTE_DEV_TO_PCI((eth_dev)->device)
+
+#if EFSYS_OPT_RX_SCALE
+/** RSS key length (bytes) */
+#define SFC_RSS_KEY_SIZE 40
+/** RSS hash offloads mask */
+#define SFC_RSS_OFFLOADS (ETH_RSS_IP | ETH_RSS_TCP)
+#endif
+
+/*
+ * +---------------+
+ * | UNINITIALIZED |<-----------+
+ * +---------------+ |
+ * |.eth_dev_init |.eth_dev_uninit
+ * V |
+ * +---------------+------------+
+ * | INITIALIZED |
+ * +---------------+<-----------<---------------+
+ * |.dev_configure | |
+ * V |failed |
+ * +---------------+------------+ |
+ * | CONFIGURING | |
+ * +---------------+----+ |
+ * |success | |
+ * | | +---------------+
+ * | | | CLOSING |
+ * | | +---------------+
+ * | | ^
+ * V |.dev_configure |
+ * +---------------+----+ |.dev_close
+ * | CONFIGURED |----------------------------+
+ * +---------------+<-----------+
+ * |.dev_start |
+ * V |
+ * +---------------+ |
+ * | STARTING |------------^
+ * +---------------+ failed |
+ * |success |
+ * | +---------------+
+ * | | STOPPING |
+ * | +---------------+
+ * | ^
+ * V |.dev_stop
+ * +---------------+------------+
+ * | STARTED |
+ * +---------------+
+ */
+enum sfc_adapter_state {
+ SFC_ADAPTER_UNINITIALIZED = 0,
+ SFC_ADAPTER_INITIALIZED,
+ SFC_ADAPTER_CONFIGURING,
+ SFC_ADAPTER_CONFIGURED,
+ SFC_ADAPTER_CLOSING,
+ SFC_ADAPTER_STARTING,
+ SFC_ADAPTER_STARTED,
+ SFC_ADAPTER_STOPPING,
+
+ SFC_ADAPTER_NSTATES
+};
+
+enum sfc_dev_filter_mode {
+ SFC_DEV_FILTER_MODE_PROMISC = 0,
+ SFC_DEV_FILTER_MODE_ALLMULTI,
+
+ SFC_DEV_FILTER_NMODES
+};
+
+enum sfc_mcdi_state {
+ SFC_MCDI_UNINITIALIZED = 0,
+ SFC_MCDI_INITIALIZED,
+ SFC_MCDI_BUSY,
+ SFC_MCDI_COMPLETED,
+
+ SFC_MCDI_NSTATES
+};
+
+struct sfc_mcdi {
+ rte_spinlock_t lock;
+ efsys_mem_t mem;
+ enum sfc_mcdi_state state;
+ efx_mcdi_transport_t transport;
+ bool logging;
+ uint32_t proxy_handle;
+ efx_rc_t proxy_result;
+};
+
+struct sfc_intr {
+ efx_intr_type_t type;
+ rte_intr_callback_fn handler;
+ boolean_t lsc_intr;
+};
+
+struct sfc_rxq_info;
+struct sfc_txq_info;
+struct sfc_dp_rx;
+
+struct sfc_port {
+ unsigned int lsc_seq;
+
+ uint32_t phy_adv_cap_mask;
+ uint32_t phy_adv_cap;
+
+ unsigned int flow_ctrl;
+ boolean_t flow_ctrl_autoneg;
+ size_t pdu;
+
+ boolean_t promisc;
+ boolean_t allmulti;
+
+ unsigned int max_mcast_addrs;
+ unsigned int nb_mcast_addrs;
+ uint8_t *mcast_addrs;
+
+ rte_spinlock_t mac_stats_lock;
+ uint64_t *mac_stats_buf;
+ efsys_mem_t mac_stats_dma_mem;
+ boolean_t mac_stats_reset_pending;
+ uint16_t mac_stats_update_period_ms;
+ uint32_t mac_stats_update_generation;
+ boolean_t mac_stats_periodic_dma_supported;
+ uint64_t mac_stats_last_request_timestamp;
+
+ uint32_t mac_stats_mask[EFX_MAC_STATS_MASK_NPAGES];
+};
+
+/* Adapter private data */
+struct sfc_adapter {
+ /*
+ * PMD setup and configuration is not thread safe. Since it is not
+ * performance sensitive, it is better to guarantee thread-safety
+ * and add device level lock. Adapter control operations which
+ * change its state should acquire the lock.
+ */
+ rte_spinlock_t lock;
+ enum sfc_adapter_state state;
+ struct rte_eth_dev *eth_dev;
+ struct rte_kvargs *kvargs;
+ bool debug_init;
+ int socket_id;
+ efsys_bar_t mem_bar;
+ efx_family_t family;
+ efx_nic_t *nic;
+ rte_spinlock_t nic_lock;
+
+ struct sfc_mcdi mcdi;
+ struct sfc_intr intr;
+ struct sfc_port port;
+ struct sfc_filter filter;
+
+ unsigned int rxq_max;
+ unsigned int txq_max;
+
+ unsigned int txq_max_entries;
+
+ uint32_t evq_flags;
+ unsigned int evq_count;
+
+ unsigned int mgmt_evq_index;
+ rte_spinlock_t mgmt_evq_lock;
+ struct sfc_evq *mgmt_evq;
+
+ unsigned int rxq_count;
+ struct sfc_rxq_info *rxq_info;
+
+ unsigned int txq_count;
+ struct sfc_txq_info *txq_info;
+
+ boolean_t tso;
+
+ unsigned int rss_channels;
+
+#if EFSYS_OPT_RX_SCALE
+ efx_rx_scale_support_t rss_support;
+ efx_rx_hash_support_t hash_support;
+ efx_rx_hash_type_t rss_hash_types;
+ unsigned int rss_tbl[EFX_RSS_TBL_SIZE];
+ uint8_t rss_key[SFC_RSS_KEY_SIZE];
+#endif
+
+ const struct sfc_dp_rx *dp_rx;
+ const struct sfc_dp_tx *dp_tx;
+};
+
+/*
+ * Add wrapper functions to acquire/release lock to be able to remove or
+ * change the lock in one place.
+ */
+
+static inline void
+sfc_adapter_lock_init(struct sfc_adapter *sa)
+{
+ rte_spinlock_init(&sa->lock);
+}
+
+static inline int
+sfc_adapter_is_locked(struct sfc_adapter *sa)
+{
+ return rte_spinlock_is_locked(&sa->lock);
+}
+
+static inline void
+sfc_adapter_lock(struct sfc_adapter *sa)
+{
+ rte_spinlock_lock(&sa->lock);
+}
+
+static inline int
+sfc_adapter_trylock(struct sfc_adapter *sa)
+{
+ return rte_spinlock_trylock(&sa->lock);
+}
+
+static inline void
+sfc_adapter_unlock(struct sfc_adapter *sa)
+{
+ rte_spinlock_unlock(&sa->lock);
+}
+
+static inline void
+sfc_adapter_lock_fini(__rte_unused struct sfc_adapter *sa)
+{
+ /* Just for symmetry of the API */
+}
+
+/** Get the number of milliseconds since boot from the default timer */
+static inline uint64_t
+sfc_get_system_msecs(void)
+{
+ return rte_get_timer_cycles() * MS_PER_S / rte_get_timer_hz();
+}
+
+int sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
+ size_t len, int socket_id, efsys_mem_t *esmp);
+void sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp);
+
+int sfc_probe(struct sfc_adapter *sa);
+void sfc_unprobe(struct sfc_adapter *sa);
+int sfc_attach(struct sfc_adapter *sa);
+void sfc_detach(struct sfc_adapter *sa);
+int sfc_start(struct sfc_adapter *sa);
+void sfc_stop(struct sfc_adapter *sa);
+
+int sfc_mcdi_init(struct sfc_adapter *sa);
+void sfc_mcdi_fini(struct sfc_adapter *sa);
+
+int sfc_configure(struct sfc_adapter *sa);
+void sfc_close(struct sfc_adapter *sa);
+
+int sfc_intr_attach(struct sfc_adapter *sa);
+void sfc_intr_detach(struct sfc_adapter *sa);
+int sfc_intr_configure(struct sfc_adapter *sa);
+void sfc_intr_close(struct sfc_adapter *sa);
+int sfc_intr_start(struct sfc_adapter *sa);
+void sfc_intr_stop(struct sfc_adapter *sa);
+
+int sfc_port_attach(struct sfc_adapter *sa);
+void sfc_port_detach(struct sfc_adapter *sa);
+int sfc_port_configure(struct sfc_adapter *sa);
+void sfc_port_close(struct sfc_adapter *sa);
+int sfc_port_start(struct sfc_adapter *sa);
+void sfc_port_stop(struct sfc_adapter *sa);
+void sfc_port_link_mode_to_info(efx_link_mode_t link_mode,
+ struct rte_eth_link *link_info);
+int sfc_port_update_mac_stats(struct sfc_adapter *sa);
+int sfc_port_reset_mac_stats(struct sfc_adapter *sa);
+int sfc_set_rx_mode(struct sfc_adapter *sa);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SFC_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_debug.h b/src/seastar/dpdk/drivers/net/sfc/sfc_debug.h
new file mode 100644
index 00000000..c0b48677
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_debug.h
@@ -0,0 +1,59 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_DEBUG_H_
+#define _SFC_DEBUG_H_
+
+#include <rte_debug.h>
+
+#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
+/* Avoid dependency from RTE_LOG_LEVEL to be able to enable debug check
+ * in the driver only.
+ */
+#define SFC_ASSERT(exp) RTE_VERIFY(exp)
+#else
+/* If the driver debug is not enabled, follow DPDK debug/non-debug */
+#define SFC_ASSERT(exp) RTE_ASSERT(exp)
+#endif
+
+/* Log PMD message, automatically add prefix and \n */
+#define sfc_panic(sa, fmt, args...) \
+ do { \
+ const struct rte_eth_dev *_dev = (sa)->eth_dev; \
+ const struct rte_pci_device *_pci_dev = SFC_DEV_TO_PCI(_dev); \
+ \
+ rte_panic("sfc " PCI_PRI_FMT " #%" PRIu8 ": " fmt "\n", \
+ _pci_dev->addr.domain, _pci_dev->addr.bus, \
+ _pci_dev->addr.devid, _pci_dev->addr.function,\
+ _dev->data->port_id, ##args); \
+ } while (0)
+
+#endif /* _SFC_DEBUG_H_ */
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_dp.c b/src/seastar/dpdk/drivers/net/sfc/sfc_dp.c
new file mode 100644
index 00000000..860aa921
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_dp.c
@@ -0,0 +1,100 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <string.h>
+#include <errno.h>
+
+#include <rte_log.h>
+
+#include "sfc_dp.h"
+
+void
+sfc_dp_queue_init(struct sfc_dp_queue *dpq, uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr)
+{
+ dpq->port_id = port_id;
+ dpq->queue_id = queue_id;
+ dpq->pci_addr = *pci_addr;
+}
+
+struct sfc_dp *
+sfc_dp_find_by_name(struct sfc_dp_list *head, enum sfc_dp_type type,
+ const char *name)
+{
+ struct sfc_dp *entry;
+
+ TAILQ_FOREACH(entry, head, links) {
+ if (entry->type != type)
+ continue;
+
+ if (strcmp(entry->name, name) == 0)
+ return entry;
+ }
+
+ return NULL;
+}
+
+struct sfc_dp *
+sfc_dp_find_by_caps(struct sfc_dp_list *head, enum sfc_dp_type type,
+ unsigned int avail_caps)
+{
+ struct sfc_dp *entry;
+
+ TAILQ_FOREACH(entry, head, links) {
+ if (entry->type != type)
+ continue;
+
+ /* Take the first matching */
+ if (sfc_dp_match_hw_fw_caps(entry, avail_caps))
+ return entry;
+ }
+
+ return NULL;
+}
+
+int
+sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry)
+{
+ if (sfc_dp_find_by_name(head, entry->type, entry->name) != NULL) {
+ rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD,
+ "sfc %s dapapath '%s' already registered\n",
+ entry->type == SFC_DP_RX ? "Rx" :
+ entry->type == SFC_DP_TX ? "Tx" :
+ "unknown",
+ entry->name);
+ return EEXIST;
+ }
+
+ TAILQ_INSERT_TAIL(head, entry, links);
+
+ return 0;
+}
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_dp.h b/src/seastar/dpdk/drivers/net/sfc/sfc_dp.h
new file mode 100644
index 00000000..eff0aa87
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_dp.h
@@ -0,0 +1,125 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_DP_H
+#define _SFC_DP_H
+
+#include <stdbool.h>
+#include <sys/queue.h>
+
+#include <rte_pci.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SFC_DIV_ROUND_UP(a, b) \
+ __extension__ ({ \
+ typeof(a) _a = (a); \
+ typeof(b) _b = (b); \
+ \
+ (_a + (_b - 1)) / _b; \
+ })
+
+/**
+ * Datapath exception handler to be provided by the control path.
+ */
+typedef void (sfc_dp_exception_t)(void *ctrl);
+
+enum sfc_dp_type {
+ SFC_DP_RX = 0, /**< Receive datapath */
+ SFC_DP_TX, /**< Transmit datapath */
+};
+
+
+/** Datapath queue run-time information */
+struct sfc_dp_queue {
+ uint16_t port_id;
+ uint16_t queue_id;
+ struct rte_pci_addr pci_addr;
+};
+
+void sfc_dp_queue_init(struct sfc_dp_queue *dpq,
+ uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr);
+
+/*
+ * Helper macro to define datapath logging macros and have uniform
+ * logging.
+ */
+#define SFC_DP_LOG(dp_name, level, dpq, ...) \
+ do { \
+ const struct sfc_dp_queue *_dpq = (dpq); \
+ const struct rte_pci_addr *_addr = &(_dpq)->pci_addr; \
+ \
+ RTE_LOG(level, PMD, \
+ RTE_FMT("%s " PCI_PRI_FMT \
+ " #%" PRIu16 ".%" PRIu16 ": " \
+ RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ dp_name, \
+ _addr->domain, _addr->bus, \
+ _addr->devid, _addr->function, \
+ _dpq->port_id, _dpq->queue_id, \
+ RTE_FMT_TAIL(__VA_ARGS__,))); \
+ } while (0)
+
+
+/** Datapath definition */
+struct sfc_dp {
+ TAILQ_ENTRY(sfc_dp) links;
+ const char *name;
+ enum sfc_dp_type type;
+ /* Mask of required hardware/firmware capabilities */
+ unsigned int hw_fw_caps;
+#define SFC_DP_HW_FW_CAP_EF10 0x1
+};
+
+/** List of datapath variants */
+TAILQ_HEAD(sfc_dp_list, sfc_dp);
+
+/* Check if available HW/FW capabilities are sufficient for the datapath */
+static inline bool
+sfc_dp_match_hw_fw_caps(const struct sfc_dp *dp, unsigned int avail_caps)
+{
+ return (dp->hw_fw_caps & avail_caps) == dp->hw_fw_caps;
+}
+
+struct sfc_dp *sfc_dp_find_by_name(struct sfc_dp_list *head,
+ enum sfc_dp_type type, const char *name);
+struct sfc_dp *sfc_dp_find_by_caps(struct sfc_dp_list *head,
+ enum sfc_dp_type type,
+ unsigned int avail_caps);
+int sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_DP_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_dp_rx.h b/src/seastar/dpdk/drivers/net/sfc/sfc_dp_rx.h
new file mode 100644
index 00000000..9d05a4b3
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_dp_rx.h
@@ -0,0 +1,197 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_DP_RX_H
+#define _SFC_DP_RX_H
+
+#include <rte_mempool.h>
+#include <rte_ethdev.h>
+
+#include "sfc_dp.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Generic receive queue information used on data path.
+ * It must be kept as small as it is possible since it is built into
+ * the structure used on datapath.
+ */
+struct sfc_dp_rxq {
+ struct sfc_dp_queue dpq;
+};
+
+/**
+ * Datapath receive queue creation information.
+ *
+ * The structure is used just to pass information from control path to
+ * datapath. It could be just function arguments, but it would be hardly
+ * readable.
+ */
+struct sfc_dp_rx_qcreate_info {
+ /** Memory pool to allocate Rx buffer from */
+ struct rte_mempool *refill_mb_pool;
+ /** Minimum number of unused Rx descriptors to do refill */
+ unsigned int refill_threshold;
+ /**
+ * Usable mbuf data space in accordance with alignment and
+ * padding requirements imposed by HW.
+ */
+ unsigned int buf_size;
+
+ /**
+ * Maximum number of Rx descriptors completed in one Rx event.
+ * Just for sanity checks if datapath would like to do.
+ */
+ unsigned int batch_max;
+
+ /** Pseudo-header size */
+ unsigned int prefix_size;
+
+ /** Receive queue flags initializer */
+ unsigned int flags;
+#define SFC_RXQ_FLAG_RSS_HASH 0x1
+
+ /** Rx queue size */
+ unsigned int rxq_entries;
+ /** DMA-mapped Rx descriptors ring */
+ void *rxq_hw_ring;
+
+ /** Associated event queue size */
+ unsigned int evq_entries;
+ /** Hardware event ring */
+ void *evq_hw_ring;
+
+ /** The queue index in hardware (required to push right doorbell) */
+ unsigned int hw_index;
+ /**
+ * Virtual address of the memory-mapped BAR to push Rx refill
+ * doorbell
+ */
+ volatile void *mem_bar;
+};
+
+/**
+ * Allocate and initialize datapath receive queue.
+ *
+ * @param port_id The port identifier
+ * @param queue_id The queue identifier
+ * @param pci_addr PCI function address
+ * @param socket_id Socket identifier to allocate memory
+ * @param info Receive queue information
+ * @param dp_rxqp Location for generic datapath receive queue pointer
+ *
+ * @return 0 or positive errno.
+ */
+typedef int (sfc_dp_rx_qcreate_t)(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr,
+ int socket_id,
+ const struct sfc_dp_rx_qcreate_info *info,
+ struct sfc_dp_rxq **dp_rxqp);
+
+/**
+ * Free resources allocated for datapath recevie queue.
+ */
+typedef void (sfc_dp_rx_qdestroy_t)(struct sfc_dp_rxq *dp_rxq);
+
+/**
+ * Receive queue start callback.
+ *
+ * It handovers EvQ to the datapath.
+ */
+typedef int (sfc_dp_rx_qstart_t)(struct sfc_dp_rxq *dp_rxq,
+ unsigned int evq_read_ptr);
+
+/**
+ * Receive queue stop function called before flush.
+ */
+typedef void (sfc_dp_rx_qstop_t)(struct sfc_dp_rxq *dp_rxq,
+ unsigned int *evq_read_ptr);
+
+/**
+ * Receive event handler used during queue flush only.
+ */
+typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id);
+
+/**
+ * Receive queue purge function called after queue flush.
+ *
+ * Should be used to free unused recevie buffers.
+ */
+typedef void (sfc_dp_rx_qpurge_t)(struct sfc_dp_rxq *dp_rxq);
+
+/** Get packet types recognized/classified */
+typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)(void);
+
+/** Get number of pending Rx descriptors */
+typedef unsigned int (sfc_dp_rx_qdesc_npending_t)(struct sfc_dp_rxq *dp_rxq);
+
+/** Receive datapath definition */
+struct sfc_dp_rx {
+ struct sfc_dp dp;
+
+ unsigned int features;
+#define SFC_DP_RX_FEAT_SCATTER 0x1
+ sfc_dp_rx_qcreate_t *qcreate;
+ sfc_dp_rx_qdestroy_t *qdestroy;
+ sfc_dp_rx_qstart_t *qstart;
+ sfc_dp_rx_qstop_t *qstop;
+ sfc_dp_rx_qrx_ev_t *qrx_ev;
+ sfc_dp_rx_qpurge_t *qpurge;
+ sfc_dp_rx_supported_ptypes_get_t *supported_ptypes_get;
+ sfc_dp_rx_qdesc_npending_t *qdesc_npending;
+ eth_rx_burst_t pkt_burst;
+};
+
+static inline struct sfc_dp_rx *
+sfc_dp_find_rx_by_name(struct sfc_dp_list *head, const char *name)
+{
+ struct sfc_dp *p = sfc_dp_find_by_name(head, SFC_DP_RX, name);
+
+ return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp);
+}
+
+static inline struct sfc_dp_rx *
+sfc_dp_find_rx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
+{
+ struct sfc_dp *p = sfc_dp_find_by_caps(head, SFC_DP_RX, avail_caps);
+
+ return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp);
+}
+
+extern struct sfc_dp_rx sfc_efx_rx;
+extern struct sfc_dp_rx sfc_ef10_rx;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_DP_RX_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_dp_tx.h b/src/seastar/dpdk/drivers/net/sfc/sfc_dp_tx.h
new file mode 100644
index 00000000..2bb9a2e7
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_dp_tx.h
@@ -0,0 +1,170 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_DP_TX_H
+#define _SFC_DP_TX_H
+
+#include <rte_ethdev.h>
+
+#include "sfc_dp.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Generic transmit queue information used on data path.
+ * It must be kept as small as it is possible since it is built into
+ * the structure used on datapath.
+ */
+struct sfc_dp_txq {
+ struct sfc_dp_queue dpq;
+};
+
+/**
+ * Datapath transmit queue creation information.
+ *
+ * The structure is used just to pass information from control path to
+ * datapath. It could be just function arguments, but it would be hardly
+ * readable.
+ */
+struct sfc_dp_tx_qcreate_info {
+ /** Minimum number of unused Tx descriptors to do reap */
+ unsigned int free_thresh;
+ /** Transmit queue configuration flags */
+ unsigned int flags;
+ /** Tx queue size */
+ unsigned int txq_entries;
+ /** Maximum size of data in the DMA descriptor */
+ uint16_t dma_desc_size_max;
+ /** DMA-mapped Tx descriptors ring */
+ void *txq_hw_ring;
+ /** Associated event queue size */
+ unsigned int evq_entries;
+ /** Hardware event ring */
+ void *evq_hw_ring;
+ /** The queue index in hardware (required to push right doorbell) */
+ unsigned int hw_index;
+ /** Virtual address of the memory-mapped BAR to push Tx doorbell */
+ volatile void *mem_bar;
+};
+
+/**
+ * Allocate and initialize datapath transmit queue.
+ *
+ * @param port_id The port identifier
+ * @param queue_id The queue identifier
+ * @param pci_addr PCI function address
+ * @param socket_id Socket identifier to allocate memory
+ * @param info Tx queue details wrapped in structure
+ * @param dp_txqp Location for generic datapath transmit queue pointer
+ *
+ * @return 0 or positive errno.
+ */
+typedef int (sfc_dp_tx_qcreate_t)(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr,
+ int socket_id,
+ const struct sfc_dp_tx_qcreate_info *info,
+ struct sfc_dp_txq **dp_txqp);
+
+/**
+ * Free resources allocated for datapath transmit queue.
+ */
+typedef void (sfc_dp_tx_qdestroy_t)(struct sfc_dp_txq *dp_txq);
+
+/**
+ * Transmit queue start callback.
+ *
+ * It handovers EvQ to the datapath.
+ */
+typedef int (sfc_dp_tx_qstart_t)(struct sfc_dp_txq *dp_txq,
+ unsigned int evq_read_ptr,
+ unsigned int txq_desc_index);
+
+/**
+ * Transmit queue stop function called before the queue flush.
+ *
+ * It returns EvQ to the control path.
+ */
+typedef void (sfc_dp_tx_qstop_t)(struct sfc_dp_txq *dp_txq,
+ unsigned int *evq_read_ptr);
+
+/**
+ * Transmit event handler used during queue flush only.
+ */
+typedef bool (sfc_dp_tx_qtx_ev_t)(struct sfc_dp_txq *dp_txq, unsigned int id);
+
+/**
+ * Transmit queue function called after the queue flush.
+ */
+typedef void (sfc_dp_tx_qreap_t)(struct sfc_dp_txq *dp_txq);
+
+/** Transmit datapath definition */
+struct sfc_dp_tx {
+ struct sfc_dp dp;
+
+ unsigned int features;
+#define SFC_DP_TX_FEAT_VLAN_INSERT 0x1
+#define SFC_DP_TX_FEAT_TSO 0x2
+#define SFC_DP_TX_FEAT_MULTI_SEG 0x4
+ sfc_dp_tx_qcreate_t *qcreate;
+ sfc_dp_tx_qdestroy_t *qdestroy;
+ sfc_dp_tx_qstart_t *qstart;
+ sfc_dp_tx_qstop_t *qstop;
+ sfc_dp_tx_qtx_ev_t *qtx_ev;
+ sfc_dp_tx_qreap_t *qreap;
+ eth_tx_burst_t pkt_burst;
+};
+
+static inline struct sfc_dp_tx *
+sfc_dp_find_tx_by_name(struct sfc_dp_list *head, const char *name)
+{
+ struct sfc_dp *p = sfc_dp_find_by_name(head, SFC_DP_TX, name);
+
+ return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp);
+}
+
+static inline struct sfc_dp_tx *
+sfc_dp_find_tx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
+{
+ struct sfc_dp *p = sfc_dp_find_by_caps(head, SFC_DP_TX, avail_caps);
+
+ return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp);
+}
+
+extern struct sfc_dp_tx sfc_efx_tx;
+extern struct sfc_dp_tx sfc_ef10_tx;
+extern struct sfc_dp_tx sfc_ef10_simple_tx;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_DP_TX_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_ef10.h b/src/seastar/dpdk/drivers/net/sfc/sfc_ef10.h
new file mode 100644
index 00000000..060d8fef
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_ef10.h
@@ -0,0 +1,107 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_EF10_H
+#define _SFC_EF10_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Number of events in one cache line */
+#define SFC_EF10_EV_PER_CACHE_LINE \
+ (RTE_CACHE_LINE_SIZE / sizeof(efx_qword_t))
+
+#define SFC_EF10_EV_QCLEAR_MASK (~(SFC_EF10_EV_PER_CACHE_LINE - 1))
+
+#if defined(SFC_EF10_EV_QCLEAR_USE_EFX)
+static inline void
+sfc_ef10_ev_qclear_cache_line(void *ptr)
+{
+ efx_qword_t *entry = ptr;
+ unsigned int i;
+
+ for (i = 0; i < SFC_EF10_EV_PER_CACHE_LINE; ++i)
+ EFX_SET_QWORD(entry[i]);
+}
+#else
+/*
+ * It is possible to do it using AVX2 and AVX512F, but it shows less
+ * performance.
+ */
+static inline void
+sfc_ef10_ev_qclear_cache_line(void *ptr)
+{
+ const __m128i val = _mm_set1_epi64x(UINT64_MAX);
+ __m128i *addr = ptr;
+ unsigned int i;
+
+ RTE_BUILD_BUG_ON(sizeof(val) > RTE_CACHE_LINE_SIZE);
+ RTE_BUILD_BUG_ON(RTE_CACHE_LINE_SIZE % sizeof(val) != 0);
+
+ for (i = 0; i < RTE_CACHE_LINE_SIZE / sizeof(val); ++i)
+ _mm_store_si128(&addr[i], val);
+}
+#endif
+
+static inline void
+sfc_ef10_ev_qclear(efx_qword_t *hw_ring, unsigned int ptr_mask,
+ unsigned int old_read_ptr, unsigned int read_ptr)
+{
+ const unsigned int clear_ptr = read_ptr & SFC_EF10_EV_QCLEAR_MASK;
+ unsigned int old_clear_ptr = old_read_ptr & SFC_EF10_EV_QCLEAR_MASK;
+
+ while (old_clear_ptr != clear_ptr) {
+ sfc_ef10_ev_qclear_cache_line(
+ &hw_ring[old_clear_ptr & ptr_mask]);
+ old_clear_ptr += SFC_EF10_EV_PER_CACHE_LINE;
+ }
+
+ /*
+ * No barriers here.
+ * Functions which push doorbell should care about correct
+ * ordering: store instructions which fill in EvQ ring should be
+ * retired from CPU and DMA sync before doorbell which will allow
+ * to use these event entries.
+ */
+}
+
+static inline bool
+sfc_ef10_ev_present(const efx_qword_t ev)
+{
+ return ~EFX_QWORD_FIELD(ev, EFX_DWORD_0) |
+ ~EFX_QWORD_FIELD(ev, EFX_DWORD_1);
+}
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_EF10_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_ef10_rx.c b/src/seastar/dpdk/drivers/net/sfc/sfc_ef10_rx.c
new file mode 100644
index 00000000..1484baba
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_ef10_rx.c
@@ -0,0 +1,712 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* EF10 native datapath implementation */
+
+#include <stdbool.h>
+
+#include <rte_byteorder.h>
+#include <rte_mbuf_ptype.h>
+#include <rte_mbuf.h>
+#include <rte_io.h>
+
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+#include "sfc_tweak.h"
+#include "sfc_dp_rx.h"
+#include "sfc_kvargs.h"
+#include "sfc_ef10.h"
+
+#define sfc_ef10_rx_err(dpq, ...) \
+ SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
+
+/**
+ * Alignment requirement for value written to RX WPTR:
+ * the WPTR must be aligned to an 8 descriptor boundary.
+ */
+#define SFC_EF10_RX_WPTR_ALIGN 8
+
+/**
+ * Maximum number of descriptors/buffers in the Rx ring.
+ * It should guarantee that corresponding event queue never overfill.
+ * EF10 native datapath uses event queue of the same size as Rx queue.
+ * Maximum number of events on datapath can be estimated as number of
+ * Rx queue entries (one event per Rx buffer in the worst case) plus
+ * Rx error and flush events.
+ */
+#define SFC_EF10_RXQ_LIMIT(_ndesc) \
+ ((_ndesc) - 1 /* head must not step on tail */ - \
+ (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
+ 1 /* Rx error */ - 1 /* flush */)
+
+struct sfc_ef10_rx_sw_desc {
+ struct rte_mbuf *mbuf;
+};
+
+struct sfc_ef10_rxq {
+ /* Used on data path */
+ unsigned int flags;
+#define SFC_EF10_RXQ_STARTED 0x1
+#define SFC_EF10_RXQ_NOT_RUNNING 0x2
+#define SFC_EF10_RXQ_EXCEPTION 0x4
+#define SFC_EF10_RXQ_RSS_HASH 0x8
+ unsigned int ptr_mask;
+ unsigned int prepared;
+ unsigned int completed;
+ unsigned int evq_read_ptr;
+ efx_qword_t *evq_hw_ring;
+ struct sfc_ef10_rx_sw_desc *sw_ring;
+ uint64_t rearm_data;
+ uint16_t prefix_size;
+
+ /* Used on refill */
+ uint16_t buf_size;
+ unsigned int added;
+ unsigned int refill_threshold;
+ struct rte_mempool *refill_mb_pool;
+ efx_qword_t *rxq_hw_ring;
+ volatile void *doorbell;
+
+ /* Datapath receive queue anchor */
+ struct sfc_dp_rxq dp;
+};
+
+static inline struct sfc_ef10_rxq *
+sfc_ef10_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
+{
+ return container_of(dp_rxq, struct sfc_ef10_rxq, dp);
+}
+
+static void
+sfc_ef10_rx_qpush(struct sfc_ef10_rxq *rxq)
+{
+ efx_dword_t dword;
+
+ /* Hardware has alignment restriction for WPTR */
+ RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0);
+ SFC_ASSERT(RTE_ALIGN(rxq->added, SFC_EF10_RX_WPTR_ALIGN) == rxq->added);
+
+ EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR,
+ rxq->added & rxq->ptr_mask);
+
+ /* DMA sync to device is not required */
+
+ /*
+ * rte_write32() has rte_io_wmb() which guarantees that the STORE
+ * operations (i.e. Rx and event descriptor updates) that precede
+ * the rte_io_wmb() call are visible to NIC before the STORE
+ * operations that follow it (i.e. doorbell write).
+ */
+ rte_write32(dword.ed_u32[0], rxq->doorbell);
+}
+
+static void
+sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
+{
+ const unsigned int ptr_mask = rxq->ptr_mask;
+ const uint32_t buf_size = rxq->buf_size;
+ unsigned int free_space;
+ unsigned int bulks;
+ void *objs[SFC_RX_REFILL_BULK];
+ unsigned int added = rxq->added;
+
+ free_space = SFC_EF10_RXQ_LIMIT(ptr_mask + 1) -
+ (added - rxq->completed);
+
+ if (free_space < rxq->refill_threshold)
+ return;
+
+ bulks = free_space / RTE_DIM(objs);
+ /* refill_threshold guarantees that bulks is positive */
+ SFC_ASSERT(bulks > 0);
+
+ do {
+ unsigned int id;
+ unsigned int i;
+
+ if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
+ RTE_DIM(objs)) < 0)) {
+ struct rte_eth_dev_data *dev_data =
+ rte_eth_devices[rxq->dp.dpq.port_id].data;
+
+ /*
+ * It is hardly a safe way to increment counter
+ * from different contexts, but all PMDs do it.
+ */
+ dev_data->rx_mbuf_alloc_failed += RTE_DIM(objs);
+ /* Return if we have posted nothing yet */
+ if (added == rxq->added)
+ return;
+ /* Push posted */
+ break;
+ }
+
+ for (i = 0, id = added & ptr_mask;
+ i < RTE_DIM(objs);
+ ++i, ++id) {
+ struct rte_mbuf *m = objs[i];
+ struct sfc_ef10_rx_sw_desc *rxd;
+ phys_addr_t phys_addr;
+
+ SFC_ASSERT((id & ~ptr_mask) == 0);
+ rxd = &rxq->sw_ring[id];
+ rxd->mbuf = m;
+
+ /*
+ * Avoid writing to mbuf. It is cheaper to do it
+ * when we receive packet and fill in nearby
+ * structure members.
+ */
+
+ phys_addr = rte_mbuf_data_dma_addr_default(m);
+ EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
+ ESF_DZ_RX_KER_BYTE_CNT, buf_size,
+ ESF_DZ_RX_KER_BUF_ADDR, phys_addr);
+ }
+
+ added += RTE_DIM(objs);
+ } while (--bulks > 0);
+
+ SFC_ASSERT(rxq->added != added);
+ rxq->added = added;
+ sfc_ef10_rx_qpush(rxq);
+}
+
+static void
+sfc_ef10_rx_prefetch_next(struct sfc_ef10_rxq *rxq, unsigned int next_id)
+{
+ struct rte_mbuf *next_mbuf;
+
+ /* Prefetch next bunch of software descriptors */
+ if ((next_id % (RTE_CACHE_LINE_SIZE / sizeof(rxq->sw_ring[0]))) == 0)
+ rte_prefetch0(&rxq->sw_ring[next_id]);
+
+ /*
+ * It looks strange to prefetch depending on previous prefetch
+ * data, but measurements show that it is really efficient and
+ * increases packet rate.
+ */
+ next_mbuf = rxq->sw_ring[next_id].mbuf;
+ if (likely(next_mbuf != NULL)) {
+ /* Prefetch the next mbuf structure */
+ rte_mbuf_prefetch_part1(next_mbuf);
+
+ /* Prefetch pseudo header of the next packet */
+ /* data_off is not filled in yet */
+ /* Yes, data could be not ready yet, but we hope */
+ rte_prefetch0((uint8_t *)next_mbuf->buf_addr +
+ RTE_PKTMBUF_HEADROOM);
+ }
+}
+
+static uint16_t
+sfc_ef10_rx_prepared(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t n_rx_pkts = RTE_MIN(nb_pkts, rxq->prepared);
+ unsigned int completed = rxq->completed;
+ unsigned int i;
+
+ rxq->prepared -= n_rx_pkts;
+ rxq->completed = completed + n_rx_pkts;
+
+ for (i = 0; i < n_rx_pkts; ++i, ++completed)
+ rx_pkts[i] = rxq->sw_ring[completed & rxq->ptr_mask].mbuf;
+
+ return n_rx_pkts;
+}
+
+static void
+sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev,
+ struct rte_mbuf *m)
+{
+ uint32_t l2_ptype = 0;
+ uint32_t l3_ptype = 0;
+ uint32_t l4_ptype = 0;
+ uint64_t ol_flags = 0;
+
+ if (unlikely(EFX_TEST_QWORD_BIT(rx_ev, ESF_DZ_RX_PARSE_INCOMPLETE_LBN)))
+ goto done;
+
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_ETH_TAG_CLASS)) {
+ case ESE_DZ_ETH_TAG_CLASS_NONE:
+ l2_ptype = RTE_PTYPE_L2_ETHER;
+ break;
+ case ESE_DZ_ETH_TAG_CLASS_VLAN1:
+ l2_ptype = RTE_PTYPE_L2_ETHER_VLAN;
+ break;
+ case ESE_DZ_ETH_TAG_CLASS_VLAN2:
+ l2_ptype = RTE_PTYPE_L2_ETHER_QINQ;
+ break;
+ default:
+ /* Unexpected Eth tag class */
+ SFC_ASSERT(false);
+ }
+
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L3_CLASS)) {
+ case ESE_DZ_L3_CLASS_IP4_FRAG:
+ l4_ptype = RTE_PTYPE_L4_FRAG;
+ /* FALLTHROUGH */
+ case ESE_DZ_L3_CLASS_IP4:
+ l3_ptype = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ ol_flags |= PKT_RX_RSS_HASH |
+ ((EFX_TEST_QWORD_BIT(rx_ev,
+ ESF_DZ_RX_IPCKSUM_ERR_LBN)) ?
+ PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
+ break;
+ case ESE_DZ_L3_CLASS_IP6_FRAG:
+ l4_ptype |= RTE_PTYPE_L4_FRAG;
+ /* FALLTHROUGH */
+ case ESE_DZ_L3_CLASS_IP6:
+ l3_ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ ol_flags |= PKT_RX_RSS_HASH;
+ break;
+ case ESE_DZ_L3_CLASS_ARP:
+ /* Override Layer 2 packet type */
+ l2_ptype = RTE_PTYPE_L2_ETHER_ARP;
+ break;
+ default:
+ /* Unexpected Layer 3 class */
+ SFC_ASSERT(false);
+ }
+
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L4_CLASS)) {
+ case ESE_DZ_L4_CLASS_TCP:
+ l4_ptype = RTE_PTYPE_L4_TCP;
+ ol_flags |=
+ (EFX_TEST_QWORD_BIT(rx_ev,
+ ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN)) ?
+ PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
+ break;
+ case ESE_DZ_L4_CLASS_UDP:
+ l4_ptype = RTE_PTYPE_L4_UDP;
+ ol_flags |=
+ (EFX_TEST_QWORD_BIT(rx_ev,
+ ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN)) ?
+ PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
+ break;
+ case ESE_DZ_L4_CLASS_UNKNOWN:
+ break;
+ default:
+ /* Unexpected Layer 4 class */
+ SFC_ASSERT(false);
+ }
+
+ /* Remove RSS hash offload flag if RSS is not enabled */
+ if (~rxq->flags & SFC_EF10_RXQ_RSS_HASH)
+ ol_flags &= ~PKT_RX_RSS_HASH;
+
+done:
+ m->ol_flags = ol_flags;
+ m->packet_type = l2_ptype | l3_ptype | l4_ptype;
+}
+
+static uint16_t
+sfc_ef10_rx_pseudo_hdr_get_len(const uint8_t *pseudo_hdr)
+{
+ return rte_le_to_cpu_16(*(const uint16_t *)&pseudo_hdr[8]);
+}
+
+static uint32_t
+sfc_ef10_rx_pseudo_hdr_get_hash(const uint8_t *pseudo_hdr)
+{
+ return rte_le_to_cpu_32(*(const uint32_t *)pseudo_hdr);
+}
+
+static uint16_t
+sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ const unsigned int ptr_mask = rxq->ptr_mask;
+ unsigned int completed = rxq->completed;
+ unsigned int ready;
+ struct sfc_ef10_rx_sw_desc *rxd;
+ struct rte_mbuf *m;
+ struct rte_mbuf *m0;
+ uint16_t n_rx_pkts;
+ const uint8_t *pseudo_hdr;
+ uint16_t pkt_len;
+
+ ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - completed) &
+ EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+ SFC_ASSERT(ready > 0);
+
+ if (rx_ev.eq_u64[0] &
+ rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |
+ (1ull << ESF_DZ_RX_ECRC_ERR_LBN))) {
+ SFC_ASSERT(rxq->prepared == 0);
+ rxq->completed += ready;
+ while (ready-- > 0) {
+ rxd = &rxq->sw_ring[completed++ & ptr_mask];
+ rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
+ }
+ return 0;
+ }
+
+ n_rx_pkts = RTE_MIN(ready, nb_pkts);
+ rxq->prepared = ready - n_rx_pkts;
+ rxq->completed += n_rx_pkts;
+
+ rxd = &rxq->sw_ring[completed++ & ptr_mask];
+
+ sfc_ef10_rx_prefetch_next(rxq, completed & ptr_mask);
+
+ m = rxd->mbuf;
+
+ *rx_pkts++ = m;
+
+ RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) != sizeof(rxq->rearm_data));
+ m->rearm_data[0] = rxq->rearm_data;
+
+ /* Classify packet based on Rx event */
+ sfc_ef10_rx_ev_to_offloads(rxq, rx_ev, m);
+
+ /* data_off already moved past pseudo header */
+ pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+ /*
+ * Always get RSS hash from pseudo header to avoid
+ * condition/branching. If it is valid or not depends on
+ * PKT_RX_RSS_HASH in m->ol_flags.
+ */
+ m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
+
+ if (ready == 1)
+ pkt_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES) -
+ rxq->prefix_size;
+ else
+ pkt_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
+ SFC_ASSERT(pkt_len > 0);
+ rte_pktmbuf_data_len(m) = pkt_len;
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+
+ SFC_ASSERT(m->next == NULL);
+
+ /* Remember mbuf to copy offload flags and packet type from */
+ m0 = m;
+ for (--ready; ready > 0; --ready) {
+ rxd = &rxq->sw_ring[completed++ & ptr_mask];
+
+ sfc_ef10_rx_prefetch_next(rxq, completed & ptr_mask);
+
+ m = rxd->mbuf;
+
+ if (ready > rxq->prepared)
+ *rx_pkts++ = m;
+
+ RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) !=
+ sizeof(rxq->rearm_data));
+ m->rearm_data[0] = rxq->rearm_data;
+
+ /* Event-dependent information is the same */
+ m->ol_flags = m0->ol_flags;
+ m->packet_type = m0->packet_type;
+
+ /* data_off already moved past pseudo header */
+ pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+ /*
+ * Always get RSS hash from pseudo header to avoid
+ * condition/branching. If it is valid or not depends on
+ * PKT_RX_RSS_HASH in m->ol_flags.
+ */
+ m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
+
+ pkt_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
+ SFC_ASSERT(pkt_len > 0);
+ rte_pktmbuf_data_len(m) = pkt_len;
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+
+ SFC_ASSERT(m->next == NULL);
+ }
+
+ return n_rx_pkts;
+}
+
+static bool
+sfc_ef10_rx_get_event(struct sfc_ef10_rxq *rxq, efx_qword_t *rx_ev)
+{
+ *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->ptr_mask];
+
+ if (!sfc_ef10_ev_present(*rx_ev))
+ return false;
+
+ if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) !=
+ FSE_AZ_EV_CODE_RX_EV)) {
+ /*
+ * Do not move read_ptr to keep the event for exception
+ * handling by the control path.
+ */
+ rxq->flags |= SFC_EF10_RXQ_EXCEPTION;
+ sfc_ef10_rx_err(&rxq->dp.dpq,
+ "RxQ exception at EvQ read ptr %#x",
+ rxq->evq_read_ptr);
+ return false;
+ }
+
+ rxq->evq_read_ptr++;
+ return true;
+}
+
+static uint16_t
+sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(rx_queue);
+ unsigned int evq_old_read_ptr;
+ uint16_t n_rx_pkts;
+ efx_qword_t rx_ev;
+
+ if (unlikely(rxq->flags &
+ (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION)))
+ return 0;
+
+ n_rx_pkts = sfc_ef10_rx_prepared(rxq, rx_pkts, nb_pkts);
+
+ evq_old_read_ptr = rxq->evq_read_ptr;
+ while (n_rx_pkts != nb_pkts && sfc_ef10_rx_get_event(rxq, &rx_ev)) {
+ /*
+ * DROP_EVENT is an internal to the NIC, software should
+ * never see it and, therefore, may ignore it.
+ */
+
+ n_rx_pkts += sfc_ef10_rx_process_event(rxq, rx_ev,
+ rx_pkts + n_rx_pkts,
+ nb_pkts - n_rx_pkts);
+ }
+
+ sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->ptr_mask, evq_old_read_ptr,
+ rxq->evq_read_ptr);
+
+ /* It is not a problem if we refill in the case of exception */
+ sfc_ef10_rx_qrefill(rxq);
+
+ return n_rx_pkts;
+}
+
+static const uint32_t *
+sfc_ef10_supported_ptypes_get(void)
+{
+ static const uint32_t ef10_native_ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L2_ETHER_QINQ,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ return ef10_native_ptypes;
+}
+
+static sfc_dp_rx_qdesc_npending_t sfc_ef10_rx_qdesc_npending;
+static unsigned int
+sfc_ef10_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq)
+{
+ /*
+ * Correct implementation requires EvQ polling and events
+ * processing (keeping all ready mbufs in prepared).
+ */
+ return -ENOTSUP;
+}
+
+
+static uint64_t
+sfc_ef10_mk_mbuf_rearm_data(uint16_t port_id, uint16_t prefix_size)
+{
+ struct rte_mbuf m;
+
+ memset(&m, 0, sizeof(m));
+
+ rte_mbuf_refcnt_set(&m, 1);
+ m.data_off = RTE_PKTMBUF_HEADROOM + prefix_size;
+ m.nb_segs = 1;
+ m.port = port_id;
+
+ /* rearm_data covers structure members filled in above */
+ rte_compiler_barrier();
+ RTE_BUILD_BUG_ON(sizeof(m.rearm_data[0]) != sizeof(uint64_t));
+ return m.rearm_data[0];
+}
+
+static sfc_dp_rx_qcreate_t sfc_ef10_rx_qcreate;
+static int
+sfc_ef10_rx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr, int socket_id,
+ const struct sfc_dp_rx_qcreate_info *info,
+ struct sfc_dp_rxq **dp_rxqp)
+{
+ struct sfc_ef10_rxq *rxq;
+ int rc;
+
+ rc = EINVAL;
+ if (info->rxq_entries != info->evq_entries)
+ goto fail_rxq_args;
+
+ rc = ENOMEM;
+ rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ goto fail_rxq_alloc;
+
+ sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring",
+ info->rxq_entries,
+ sizeof(*rxq->sw_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq->sw_ring == NULL)
+ goto fail_desc_alloc;
+
+ rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
+ if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
+ rxq->flags |= SFC_EF10_RXQ_RSS_HASH;
+ rxq->ptr_mask = info->rxq_entries - 1;
+ rxq->evq_hw_ring = info->evq_hw_ring;
+ rxq->refill_threshold = info->refill_threshold;
+ rxq->rearm_data =
+ sfc_ef10_mk_mbuf_rearm_data(port_id, info->prefix_size);
+ rxq->prefix_size = info->prefix_size;
+ rxq->buf_size = info->buf_size;
+ rxq->refill_mb_pool = info->refill_mb_pool;
+ rxq->rxq_hw_ring = info->rxq_hw_ring;
+ rxq->doorbell = (volatile uint8_t *)info->mem_bar +
+ ER_DZ_RX_DESC_UPD_REG_OFST +
+ info->hw_index * ER_DZ_RX_DESC_UPD_REG_STEP;
+
+ *dp_rxqp = &rxq->dp;
+ return 0;
+
+fail_desc_alloc:
+ rte_free(rxq);
+
+fail_rxq_alloc:
+fail_rxq_args:
+ return rc;
+}
+
+static sfc_dp_rx_qdestroy_t sfc_ef10_rx_qdestroy;
+static void
+sfc_ef10_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+}
+
+static sfc_dp_rx_qstart_t sfc_ef10_rx_qstart;
+static int
+sfc_ef10_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->prepared = 0;
+ rxq->completed = rxq->added = 0;
+
+ sfc_ef10_rx_qrefill(rxq);
+
+ rxq->evq_read_ptr = evq_read_ptr;
+
+ rxq->flags |= SFC_EF10_RXQ_STARTED;
+ rxq->flags &= ~(SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION);
+
+ return 0;
+}
+
+static sfc_dp_rx_qstop_t sfc_ef10_rx_qstop;
+static void
+sfc_ef10_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
+
+ *evq_read_ptr = rxq->evq_read_ptr;
+}
+
+static sfc_dp_rx_qrx_ev_t sfc_ef10_rx_qrx_ev;
+static bool
+sfc_ef10_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
+{
+ __rte_unused struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+
+ SFC_ASSERT(rxq->flags & SFC_EF10_RXQ_NOT_RUNNING);
+
+ /*
+ * It is safe to ignore Rx event since we free all mbufs on
+ * queue purge anyway.
+ */
+
+ return false;
+}
+
+static sfc_dp_rx_qpurge_t sfc_ef10_rx_qpurge;
+static void
+sfc_ef10_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+ unsigned int i;
+ struct sfc_ef10_rx_sw_desc *rxd;
+
+ for (i = rxq->completed; i != rxq->added; ++i) {
+ rxd = &rxq->sw_ring[i & rxq->ptr_mask];
+ rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
+ rxd->mbuf = NULL;
+ }
+
+ rxq->flags &= ~SFC_EF10_RXQ_STARTED;
+}
+
+struct sfc_dp_rx sfc_ef10_rx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EF10,
+ .type = SFC_DP_RX,
+ .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
+ },
+ .features = 0,
+ .qcreate = sfc_ef10_rx_qcreate,
+ .qdestroy = sfc_ef10_rx_qdestroy,
+ .qstart = sfc_ef10_rx_qstart,
+ .qstop = sfc_ef10_rx_qstop,
+ .qrx_ev = sfc_ef10_rx_qrx_ev,
+ .qpurge = sfc_ef10_rx_qpurge,
+ .supported_ptypes_get = sfc_ef10_supported_ptypes_get,
+ .qdesc_npending = sfc_ef10_rx_qdesc_npending,
+ .pkt_burst = sfc_ef10_recv_pkts,
+};
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_ef10_tx.c b/src/seastar/dpdk/drivers/net/sfc/sfc_ef10_tx.c
new file mode 100644
index 00000000..bac9baa9
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_ef10_tx.c
@@ -0,0 +1,560 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdbool.h>
+
+#include <rte_mbuf.h>
+#include <rte_io.h>
+
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+#include "sfc_dp_tx.h"
+#include "sfc_tweak.h"
+#include "sfc_kvargs.h"
+#include "sfc_ef10.h"
+
+#define sfc_ef10_tx_err(dpq, ...) \
+ SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
+
+/** Maximum length of the DMA descriptor data */
+#define SFC_EF10_TX_DMA_DESC_LEN_MAX \
+ ((1u << ESF_DZ_TX_KER_BYTE_CNT_WIDTH) - 1)
+
+/**
+ * Maximum number of descriptors/buffers in the Tx ring.
+ * It should guarantee that corresponding event queue never overfill.
+ * EF10 native datapath uses event queue of the same size as Tx queue.
+ * Maximum number of events on datapath can be estimated as number of
+ * Tx queue entries (one event per Tx buffer in the worst case) plus
+ * Tx error and flush events.
+ */
+#define SFC_EF10_TXQ_LIMIT(_ndesc) \
+ ((_ndesc) - 1 /* head must not step on tail */ - \
+ (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
+ 1 /* Rx error */ - 1 /* flush */)
+
+struct sfc_ef10_tx_sw_desc {
+ struct rte_mbuf *mbuf;
+};
+
+struct sfc_ef10_txq {
+ unsigned int flags;
+#define SFC_EF10_TXQ_STARTED 0x1
+#define SFC_EF10_TXQ_NOT_RUNNING 0x2
+#define SFC_EF10_TXQ_EXCEPTION 0x4
+
+ unsigned int ptr_mask;
+ unsigned int added;
+ unsigned int completed;
+ unsigned int free_thresh;
+ unsigned int evq_read_ptr;
+ struct sfc_ef10_tx_sw_desc *sw_ring;
+ efx_qword_t *txq_hw_ring;
+ volatile void *doorbell;
+ efx_qword_t *evq_hw_ring;
+
+ /* Datapath transmit queue anchor */
+ struct sfc_dp_txq dp;
+};
+
+static inline struct sfc_ef10_txq *
+sfc_ef10_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
+{
+ return container_of(dp_txq, struct sfc_ef10_txq, dp);
+}
+
+static bool
+sfc_ef10_tx_get_event(struct sfc_ef10_txq *txq, efx_qword_t *tx_ev)
+{
+ volatile efx_qword_t *evq_hw_ring = txq->evq_hw_ring;
+
+ /*
+ * Exception flag is set when reap is done.
+ * It is never done twice per packet burst get and absence of
+ * the flag is checked on burst get entry.
+ */
+ SFC_ASSERT((txq->flags & SFC_EF10_TXQ_EXCEPTION) == 0);
+
+ *tx_ev = evq_hw_ring[txq->evq_read_ptr & txq->ptr_mask];
+
+ if (!sfc_ef10_ev_present(*tx_ev))
+ return false;
+
+ if (unlikely(EFX_QWORD_FIELD(*tx_ev, FSF_AZ_EV_CODE) !=
+ FSE_AZ_EV_CODE_TX_EV)) {
+ /*
+ * Do not move read_ptr to keep the event for exception
+ * handling by the control path.
+ */
+ txq->flags |= SFC_EF10_TXQ_EXCEPTION;
+ sfc_ef10_tx_err(&txq->dp.dpq,
+ "TxQ exception at EvQ read ptr %#x",
+ txq->evq_read_ptr);
+ return false;
+ }
+
+ txq->evq_read_ptr++;
+ return true;
+}
+
+static void
+sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
+{
+ const unsigned int old_read_ptr = txq->evq_read_ptr;
+ const unsigned int ptr_mask = txq->ptr_mask;
+ unsigned int completed = txq->completed;
+ unsigned int pending = completed;
+ const unsigned int curr_done = pending - 1;
+ unsigned int anew_done = curr_done;
+ efx_qword_t tx_ev;
+
+ while (sfc_ef10_tx_get_event(txq, &tx_ev)) {
+ /*
+ * DROP_EVENT is an internal to the NIC, software should
+ * never see it and, therefore, may ignore it.
+ */
+
+ /* Update the latest done descriptor */
+ anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
+ }
+ pending += (anew_done - curr_done) & ptr_mask;
+
+ if (pending != completed) {
+ do {
+ struct sfc_ef10_tx_sw_desc *txd;
+
+ txd = &txq->sw_ring[completed & ptr_mask];
+
+ if (txd->mbuf != NULL) {
+ rte_pktmbuf_free(txd->mbuf);
+ txd->mbuf = NULL;
+ }
+ } while (++completed != pending);
+
+ txq->completed = completed;
+ }
+
+ sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
+ txq->evq_read_ptr);
+}
+
+static void
+sfc_ef10_tx_qdesc_dma_create(phys_addr_t addr, uint16_t size, bool eop,
+ efx_qword_t *edp)
+{
+ EFX_POPULATE_QWORD_4(*edp,
+ ESF_DZ_TX_KER_TYPE, 0,
+ ESF_DZ_TX_KER_CONT, !eop,
+ ESF_DZ_TX_KER_BYTE_CNT, size,
+ ESF_DZ_TX_KER_BUF_ADDR, addr);
+}
+
+static inline void
+sfc_ef10_tx_qpush(struct sfc_ef10_txq *txq, unsigned int added,
+ unsigned int pushed)
+{
+ efx_qword_t desc;
+ efx_oword_t oword;
+
+ /*
+ * This improves performance by pushing a TX descriptor at the same
+ * time as the doorbell. The descriptor must be added to the TXQ,
+ * so that can be used if the hardware decides not to use the pushed
+ * descriptor.
+ */
+ desc.eq_u64[0] = txq->txq_hw_ring[pushed & txq->ptr_mask].eq_u64[0];
+ EFX_POPULATE_OWORD_3(oword,
+ ERF_DZ_TX_DESC_WPTR, added & txq->ptr_mask,
+ ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
+ ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
+
+ /* DMA sync to device is not required */
+
+ /*
+ * rte_io_wmb() which guarantees that the STORE operations
+ * (i.e. Tx and event descriptor updates) that precede
+ * the rte_io_wmb() call are visible to NIC before the STORE
+ * operations that follow it (i.e. doorbell write).
+ */
+ rte_io_wmb();
+
+ *(volatile __m128i *)txq->doorbell = oword.eo_u128[0];
+}
+
+static unsigned int
+sfc_ef10_tx_pkt_descs_max(const struct rte_mbuf *m)
+{
+ unsigned int extra_descs_per_seg;
+ unsigned int extra_descs_per_pkt;
+
+ /*
+ * VLAN offload is not supported yet, so no extra descriptors
+ * are required for VLAN option descriptor.
+ */
+
+/** Maximum length of the mbuf segment data */
+#define SFC_MBUF_SEG_LEN_MAX UINT16_MAX
+ RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2);
+
+ /*
+ * Each segment is already counted once below. So, calculate
+ * how many extra DMA descriptors may be required per segment in
+ * the worst case because of maximum DMA descriptor length limit.
+ * If maximum segment length is less or equal to maximum DMA
+ * descriptor length, no extra DMA descriptors are required.
+ */
+ extra_descs_per_seg =
+ (SFC_MBUF_SEG_LEN_MAX - 1) / SFC_EF10_TX_DMA_DESC_LEN_MAX;
+
+/** Maximum length of the packet */
+#define SFC_MBUF_PKT_LEN_MAX UINT32_MAX
+ RTE_BUILD_BUG_ON(sizeof(m->pkt_len) != 4);
+
+ /*
+ * One more limitation on maximum number of extra DMA descriptors
+ * comes from slicing entire packet because of DMA descriptor length
+ * limit taking into account that there is at least one segment
+ * which is already counted below (so division of the maximum
+ * packet length minus one with round down).
+ * TSO is not supported yet, so packet length is limited by
+ * maximum PDU size.
+ */
+ extra_descs_per_pkt =
+ (RTE_MIN((unsigned int)EFX_MAC_PDU_MAX,
+ SFC_MBUF_PKT_LEN_MAX) - 1) /
+ SFC_EF10_TX_DMA_DESC_LEN_MAX;
+
+ return m->nb_segs + RTE_MIN(m->nb_segs * extra_descs_per_seg,
+ extra_descs_per_pkt);
+}
+
+static uint16_t
+sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
+ unsigned int ptr_mask;
+ unsigned int added;
+ unsigned int dma_desc_space;
+ bool reap_done;
+ struct rte_mbuf **pktp;
+ struct rte_mbuf **pktp_end;
+
+ if (unlikely(txq->flags &
+ (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
+ return 0;
+
+ ptr_mask = txq->ptr_mask;
+ added = txq->added;
+ dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
+ (added - txq->completed);
+
+ reap_done = (dma_desc_space < txq->free_thresh);
+ if (reap_done) {
+ sfc_ef10_tx_reap(txq);
+ dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
+ (added - txq->completed);
+ }
+
+ for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts];
+ pktp != pktp_end;
+ ++pktp) {
+ struct rte_mbuf *m_seg = *pktp;
+ unsigned int pkt_start = added;
+ uint32_t pkt_len;
+
+ if (likely(pktp + 1 != pktp_end))
+ rte_mbuf_prefetch_part1(pktp[1]);
+
+ if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space) {
+ if (reap_done)
+ break;
+
+ /* Push already prepared descriptors before polling */
+ if (added != txq->added) {
+ sfc_ef10_tx_qpush(txq, added, txq->added);
+ txq->added = added;
+ }
+
+ sfc_ef10_tx_reap(txq);
+ reap_done = true;
+ dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
+ (added - txq->completed);
+ if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space)
+ break;
+ }
+
+ pkt_len = m_seg->pkt_len;
+ do {
+ phys_addr_t seg_addr = rte_mbuf_data_dma_addr(m_seg);
+ unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
+
+ SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX);
+
+ pkt_len -= seg_len;
+
+ sfc_ef10_tx_qdesc_dma_create(seg_addr,
+ seg_len, (pkt_len == 0),
+ &txq->txq_hw_ring[added & ptr_mask]);
+ ++added;
+
+ } while ((m_seg = m_seg->next) != 0);
+
+ dma_desc_space -= (added - pkt_start);
+
+ /* Assign mbuf to the last used desc */
+ txq->sw_ring[(added - 1) & ptr_mask].mbuf = *pktp;
+ }
+
+ if (likely(added != txq->added)) {
+ sfc_ef10_tx_qpush(txq, added, txq->added);
+ txq->added = added;
+ }
+
+#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
+ if (!reap_done)
+ sfc_ef10_tx_reap(txq);
+#endif
+
+ return pktp - &tx_pkts[0];
+}
+
+static uint16_t
+sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
+ unsigned int ptr_mask;
+ unsigned int added;
+ unsigned int dma_desc_space;
+ bool reap_done;
+ struct rte_mbuf **pktp;
+ struct rte_mbuf **pktp_end;
+
+ if (unlikely(txq->flags &
+ (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
+ return 0;
+
+ ptr_mask = txq->ptr_mask;
+ added = txq->added;
+ dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
+ (added - txq->completed);
+
+ reap_done = (dma_desc_space < RTE_MAX(txq->free_thresh, nb_pkts));
+ if (reap_done) {
+ sfc_ef10_tx_reap(txq);
+ dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
+ (added - txq->completed);
+ }
+
+ pktp_end = &tx_pkts[MIN(nb_pkts, dma_desc_space)];
+ for (pktp = &tx_pkts[0]; pktp != pktp_end; ++pktp) {
+ struct rte_mbuf *pkt = *pktp;
+ unsigned int id = added & ptr_mask;
+
+ SFC_ASSERT(rte_pktmbuf_data_len(pkt) <=
+ SFC_EF10_TX_DMA_DESC_LEN_MAX);
+
+ sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_dma_addr(pkt),
+ rte_pktmbuf_data_len(pkt),
+ true, &txq->txq_hw_ring[id]);
+
+ txq->sw_ring[id].mbuf = pkt;
+
+ ++added;
+ }
+
+ if (likely(added != txq->added)) {
+ sfc_ef10_tx_qpush(txq, added, txq->added);
+ txq->added = added;
+ }
+
+#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
+ if (!reap_done)
+ sfc_ef10_tx_reap(txq);
+#endif
+
+ return pktp - &tx_pkts[0];
+}
+
+
+static sfc_dp_tx_qcreate_t sfc_ef10_tx_qcreate;
+static int
+sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr, int socket_id,
+ const struct sfc_dp_tx_qcreate_info *info,
+ struct sfc_dp_txq **dp_txqp)
+{
+ struct sfc_ef10_txq *txq;
+ int rc;
+
+ rc = EINVAL;
+ if (info->txq_entries != info->evq_entries)
+ goto fail_bad_args;
+
+ rc = ENOMEM;
+ txq = rte_zmalloc_socket("sfc-ef10-txq", sizeof(*txq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL)
+ goto fail_txq_alloc;
+
+ sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ txq->sw_ring = rte_calloc_socket("sfc-ef10-txq-sw_ring",
+ info->txq_entries,
+ sizeof(*txq->sw_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq->sw_ring == NULL)
+ goto fail_sw_ring_alloc;
+
+ txq->flags = SFC_EF10_TXQ_NOT_RUNNING;
+ txq->ptr_mask = info->txq_entries - 1;
+ txq->free_thresh = info->free_thresh;
+ txq->txq_hw_ring = info->txq_hw_ring;
+ txq->doorbell = (volatile uint8_t *)info->mem_bar +
+ ER_DZ_TX_DESC_UPD_REG_OFST +
+ info->hw_index * ER_DZ_TX_DESC_UPD_REG_STEP;
+ txq->evq_hw_ring = info->evq_hw_ring;
+
+ *dp_txqp = &txq->dp;
+ return 0;
+
+fail_sw_ring_alloc:
+ rte_free(txq);
+
+fail_txq_alloc:
+fail_bad_args:
+ return rc;
+}
+
+static sfc_dp_tx_qdestroy_t sfc_ef10_tx_qdestroy;
+static void
+sfc_ef10_tx_qdestroy(struct sfc_dp_txq *dp_txq)
+{
+ struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+}
+
+static sfc_dp_tx_qstart_t sfc_ef10_tx_qstart;
+static int
+sfc_ef10_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr,
+ unsigned int txq_desc_index)
+{
+ struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+ txq->evq_read_ptr = evq_read_ptr;
+ txq->added = txq->completed = txq_desc_index;
+
+ txq->flags |= SFC_EF10_TXQ_STARTED;
+ txq->flags &= ~(SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION);
+
+ return 0;
+}
+
+static sfc_dp_tx_qstop_t sfc_ef10_tx_qstop;
+static void
+sfc_ef10_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr)
+{
+ struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+ txq->flags |= SFC_EF10_TXQ_NOT_RUNNING;
+
+ *evq_read_ptr = txq->evq_read_ptr;
+}
+
+static sfc_dp_tx_qtx_ev_t sfc_ef10_tx_qtx_ev;
+static bool
+sfc_ef10_tx_qtx_ev(struct sfc_dp_txq *dp_txq, __rte_unused unsigned int id)
+{
+ __rte_unused struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+ SFC_ASSERT(txq->flags & SFC_EF10_TXQ_NOT_RUNNING);
+
+ /*
+ * It is safe to ignore Tx event since we reap all mbufs on
+ * queue purge anyway.
+ */
+
+ return false;
+}
+
+static sfc_dp_tx_qreap_t sfc_ef10_tx_qreap;
+static void
+sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
+{
+ struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+ unsigned int txds;
+
+ for (txds = 0; txds <= txq->ptr_mask; ++txds) {
+ if (txq->sw_ring[txds].mbuf != NULL) {
+ rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
+ txq->sw_ring[txds].mbuf = NULL;
+ }
+ }
+
+ txq->flags &= ~SFC_EF10_TXQ_STARTED;
+}
+
+struct sfc_dp_tx sfc_ef10_tx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EF10,
+ .type = SFC_DP_TX,
+ .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
+ },
+ .features = SFC_DP_TX_FEAT_MULTI_SEG,
+ .qcreate = sfc_ef10_tx_qcreate,
+ .qdestroy = sfc_ef10_tx_qdestroy,
+ .qstart = sfc_ef10_tx_qstart,
+ .qtx_ev = sfc_ef10_tx_qtx_ev,
+ .qstop = sfc_ef10_tx_qstop,
+ .qreap = sfc_ef10_tx_qreap,
+ .pkt_burst = sfc_ef10_xmit_pkts,
+};
+
+struct sfc_dp_tx sfc_ef10_simple_tx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EF10_SIMPLE,
+ .type = SFC_DP_TX,
+ },
+ .features = 0,
+ .qcreate = sfc_ef10_tx_qcreate,
+ .qdestroy = sfc_ef10_tx_qdestroy,
+ .qstart = sfc_ef10_tx_qstart,
+ .qtx_ev = sfc_ef10_tx_qtx_ev,
+ .qstop = sfc_ef10_tx_qstop,
+ .qreap = sfc_ef10_tx_qreap,
+ .pkt_burst = sfc_ef10_simple_xmit_pkts,
+};
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_ethdev.c b/src/seastar/dpdk/drivers/net/sfc/sfc_ethdev.c
new file mode 100644
index 00000000..4c9335f3
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_ethdev.c
@@ -0,0 +1,1642 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
+#include <rte_pci.h>
+#include <rte_errno.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_log.h"
+#include "sfc_kvargs.h"
+#include "sfc_ev.h"
+#include "sfc_rx.h"
+#include "sfc_tx.h"
+#include "sfc_flow.h"
+#include "sfc_dp.h"
+#include "sfc_dp_rx.h"
+
+static struct sfc_dp_list sfc_dp_head =
+ TAILQ_HEAD_INITIALIZER(sfc_dp_head);
+
+static int
+sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ efx_nic_fw_info_t enfi;
+ int ret;
+ int rc;
+
+ /*
+ * Return value of the callback is likely supposed to be
+ * equal to or greater than 0, nevertheless, if an error
+ * occurs, it will be desirable to pass it to the caller
+ */
+ if ((fw_version == NULL) || (fw_size == 0))
+ return -EINVAL;
+
+ rc = efx_nic_get_fw_version(sa->nic, &enfi);
+ if (rc != 0)
+ return -rc;
+
+ ret = snprintf(fw_version, fw_size,
+ "%" PRIu16 ".%" PRIu16 ".%" PRIu16 ".%" PRIu16,
+ enfi.enfi_mc_fw_version[0], enfi.enfi_mc_fw_version[1],
+ enfi.enfi_mc_fw_version[2], enfi.enfi_mc_fw_version[3]);
+ if (ret < 0)
+ return ret;
+
+ if (enfi.enfi_dpcpu_fw_ids_valid) {
+ size_t dpcpu_fw_ids_offset = MIN(fw_size - 1, (size_t)ret);
+ int ret_extra;
+
+ ret_extra = snprintf(fw_version + dpcpu_fw_ids_offset,
+ fw_size - dpcpu_fw_ids_offset,
+ " rx%" PRIx16 " tx%" PRIx16,
+ enfi.enfi_rx_dpcpu_fw_id,
+ enfi.enfi_tx_dpcpu_fw_id);
+ if (ret_extra < 0)
+ return ret_extra;
+
+ ret += ret_extra;
+ }
+
+ if (fw_size < (size_t)(++ret))
+ return ret;
+ else
+ return 0;
+}
+
+static void
+sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+
+ sfc_log_init(sa, "entry");
+
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
+
+ /* Autonegotiation may be disabled */
+ dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_1G;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_10G;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+
+ dev_info->max_rx_queues = sa->rxq_max;
+ dev_info->max_tx_queues = sa->txq_max;
+
+ /* By default packets are dropped if no descriptors are available */
+ dev_info->default_rxconf.rx_drop_en = 1;
+
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
+ if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) ||
+ !encp->enc_hw_tx_insert_vlan_enabled)
+ dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
+ else
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT;
+
+ if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
+ dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
+
+#if EFSYS_OPT_RX_SCALE
+ if (sa->rss_support != EFX_RX_SCALE_UNAVAILABLE) {
+ dev_info->reta_size = EFX_RSS_TBL_SIZE;
+ dev_info->hash_key_size = SFC_RSS_KEY_SIZE;
+ dev_info->flow_type_rss_offloads = SFC_RSS_OFFLOADS;
+ }
+#endif
+
+ if (sa->tso)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+
+ dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
+ dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS;
+ /* The RXQ hardware requires that the descriptor count is a power
+ * of 2, but rx_desc_lim cannot properly describe that constraint.
+ */
+ dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
+
+ dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
+ dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS;
+ /*
+ * The TXQ hardware requires that the descriptor count is a power
+ * of 2, but tx_desc_lim cannot properly describe that constraint
+ */
+ dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS;
+}
+
+static const uint32_t *
+sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ return sa->dp_rx->supported_ptypes_get();
+}
+
+static int
+sfc_dev_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *dev_data = dev->data;
+ struct sfc_adapter *sa = dev_data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
+ dev_data->nb_rx_queues, dev_data->nb_tx_queues);
+
+ sfc_adapter_lock(sa);
+ switch (sa->state) {
+ case SFC_ADAPTER_CONFIGURED:
+ /* FALLTHROUGH */
+ case SFC_ADAPTER_INITIALIZED:
+ rc = sfc_configure(sa);
+ break;
+ default:
+ sfc_err(sa, "unexpected adapter state %u to configure",
+ sa->state);
+ rc = EINVAL;
+ break;
+ }
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done %d", rc);
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+
+static int
+sfc_dev_start(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ rc = sfc_start(sa);
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done %d", rc);
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+
+static int
+sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_eth_link *dev_link = &dev->data->dev_link;
+ struct rte_eth_link old_link;
+ struct rte_eth_link current_link;
+
+ sfc_log_init(sa, "entry");
+
+retry:
+ EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
+ *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link);
+
+ if (sa->state != SFC_ADAPTER_STARTED) {
+ sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &current_link);
+ if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
+ *(uint64_t *)&old_link,
+ *(uint64_t *)&current_link))
+ goto retry;
+ } else if (wait_to_complete) {
+ efx_link_mode_t link_mode;
+
+ if (efx_port_poll(sa->nic, &link_mode) != 0)
+ link_mode = EFX_LINK_UNKNOWN;
+ sfc_port_link_mode_to_info(link_mode, &current_link);
+
+ if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
+ *(uint64_t *)&old_link,
+ *(uint64_t *)&current_link))
+ goto retry;
+ } else {
+ sfc_ev_mgmt_qpoll(sa);
+ *(int64_t *)&current_link =
+ rte_atomic64_read((rte_atomic64_t *)dev_link);
+ }
+
+ if (old_link.link_status != current_link.link_status)
+ sfc_info(sa, "Link status is %s",
+ current_link.link_status ? "UP" : "DOWN");
+
+ return old_link.link_status == current_link.link_status ? 0 : -1;
+}
+
+static void
+sfc_dev_stop(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ sfc_stop(sa);
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done");
+}
+
+static int
+sfc_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ rc = sfc_start(sa);
+ sfc_adapter_unlock(sa);
+
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+
+static int
+sfc_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ sfc_stop(sa);
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static void
+sfc_dev_close(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ switch (sa->state) {
+ case SFC_ADAPTER_STARTED:
+ sfc_stop(sa);
+ SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
+ /* FALLTHROUGH */
+ case SFC_ADAPTER_CONFIGURED:
+ sfc_close(sa);
+ SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
+ /* FALLTHROUGH */
+ case SFC_ADAPTER_INITIALIZED:
+ break;
+ default:
+ sfc_err(sa, "unexpected adapter state %u on close", sa->state);
+ break;
+ }
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done");
+}
+
+static void
+sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode,
+ boolean_t enabled)
+{
+ struct sfc_port *port;
+ boolean_t *toggle;
+ struct sfc_adapter *sa = dev->data->dev_private;
+ boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI);
+ const char *desc = (allmulti) ? "all-multi" : "promiscuous";
+
+ sfc_adapter_lock(sa);
+
+ port = &sa->port;
+ toggle = (allmulti) ? (&port->allmulti) : (&port->promisc);
+
+ if (*toggle != enabled) {
+ *toggle = enabled;
+
+ if ((sa->state == SFC_ADAPTER_STARTED) &&
+ (sfc_set_rx_mode(sa) != 0)) {
+ *toggle = !(enabled);
+ sfc_warn(sa, "Failed to %s %s mode",
+ ((enabled) ? "enable" : "disable"), desc);
+ }
+ }
+
+ sfc_adapter_unlock(sa);
+}
+
+static void
+sfc_dev_promisc_enable(struct rte_eth_dev *dev)
+{
+ sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE);
+}
+
+static void
+sfc_dev_promisc_disable(struct rte_eth_dev *dev)
+{
+ sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE);
+}
+
+static void
+sfc_dev_allmulti_enable(struct rte_eth_dev *dev)
+{
+ sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE);
+}
+
+static void
+sfc_dev_allmulti_disable(struct rte_eth_dev *dev)
+{
+ sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE);
+}
+
+static int
+sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
+ rx_queue_id, nb_rx_desc, socket_id);
+
+ sfc_adapter_lock(sa);
+
+ rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
+ rx_conf, mb_pool);
+ if (rc != 0)
+ goto fail_rx_qinit;
+
+ dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq->dp;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+
+fail_rx_qinit:
+ sfc_adapter_unlock(sa);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static void
+sfc_rx_queue_release(void *queue)
+{
+ struct sfc_dp_rxq *dp_rxq = queue;
+ struct sfc_rxq *rxq;
+ struct sfc_adapter *sa;
+ unsigned int sw_index;
+
+ if (dp_rxq == NULL)
+ return;
+
+ rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+ sa = rxq->evq->sa;
+ sfc_adapter_lock(sa);
+
+ sw_index = sfc_rxq_sw_index(rxq);
+
+ sfc_log_init(sa, "RxQ=%u", sw_index);
+
+ sa->eth_dev->data->rx_queues[sw_index] = NULL;
+
+ sfc_rx_qfini(sa, sw_index);
+
+ sfc_adapter_unlock(sa);
+}
+
+static int
+sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
+ tx_queue_id, nb_tx_desc, socket_id);
+
+ sfc_adapter_lock(sa);
+
+ rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
+ if (rc != 0)
+ goto fail_tx_qinit;
+
+ dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq->dp;
+
+ sfc_adapter_unlock(sa);
+ return 0;
+
+fail_tx_qinit:
+ sfc_adapter_unlock(sa);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static void
+sfc_tx_queue_release(void *queue)
+{
+ struct sfc_dp_txq *dp_txq = queue;
+ struct sfc_txq *txq;
+ unsigned int sw_index;
+ struct sfc_adapter *sa;
+
+ if (dp_txq == NULL)
+ return;
+
+ txq = sfc_txq_by_dp_txq(dp_txq);
+ sw_index = sfc_txq_sw_index(txq);
+
+ SFC_ASSERT(txq->evq != NULL);
+ sa = txq->evq->sa;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ sfc_adapter_lock(sa);
+
+ SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues);
+ sa->eth_dev->data->tx_queues[sw_index] = NULL;
+
+ sfc_tx_qfini(sa, sw_index);
+
+ sfc_adapter_unlock(sa);
+}
+
+static void
+sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ uint64_t *mac_stats;
+
+ rte_spinlock_lock(&port->mac_stats_lock);
+
+ if (sfc_port_update_mac_stats(sa) != 0)
+ goto unlock;
+
+ mac_stats = port->mac_stats_buf;
+
+ if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask,
+ EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) {
+ stats->ipackets =
+ mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
+ stats->opackets =
+ mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
+ stats->ibytes =
+ mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
+ stats->obytes =
+ mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
+ stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW];
+ stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
+ stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
+ } else {
+ stats->ipackets = mac_stats[EFX_MAC_RX_PKTS];
+ stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
+ stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS];
+ stats->obytes = mac_stats[EFX_MAC_TX_OCTETS];
+ /*
+ * Take into account stats which are whenever supported
+ * on EF10. If some stat is not supported by current
+ * firmware variant or HW revision, it is guaranteed
+ * to be zero in mac_stats.
+ */
+ stats->imissed =
+ mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] +
+ mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] +
+ mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] +
+ mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] +
+ mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] +
+ mac_stats[EFX_MAC_PM_TRUNC_QBB] +
+ mac_stats[EFX_MAC_PM_DISCARD_QBB] +
+ mac_stats[EFX_MAC_PM_DISCARD_MAPPING] +
+ mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] +
+ mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS];
+ stats->ierrors =
+ mac_stats[EFX_MAC_RX_FCS_ERRORS] +
+ mac_stats[EFX_MAC_RX_ALIGN_ERRORS] +
+ mac_stats[EFX_MAC_RX_JABBER_PKTS];
+ /* no oerrors counters supported on EF10 */
+ }
+
+unlock:
+ rte_spinlock_unlock(&port->mac_stats_lock);
+}
+
+static void
+sfc_stats_reset(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ int rc;
+
+ if (sa->state != SFC_ADAPTER_STARTED) {
+ /*
+ * The operation cannot be done if port is not started; it
+ * will be scheduled to be done during the next port start
+ */
+ port->mac_stats_reset_pending = B_TRUE;
+ return;
+ }
+
+ rc = sfc_port_reset_mac_stats(sa);
+ if (rc != 0)
+ sfc_err(sa, "failed to reset statistics (rc = %d)", rc);
+}
+
+static int
+sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int xstats_count)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ uint64_t *mac_stats;
+ int rc;
+ unsigned int i;
+ int nstats = 0;
+
+ rte_spinlock_lock(&port->mac_stats_lock);
+
+ rc = sfc_port_update_mac_stats(sa);
+ if (rc != 0) {
+ SFC_ASSERT(rc > 0);
+ nstats = -rc;
+ goto unlock;
+ }
+
+ mac_stats = port->mac_stats_buf;
+
+ for (i = 0; i < EFX_MAC_NSTATS; ++i) {
+ if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
+ if (xstats != NULL && nstats < (int)xstats_count) {
+ xstats[nstats].id = nstats;
+ xstats[nstats].value = mac_stats[i];
+ }
+ nstats++;
+ }
+ }
+
+unlock:
+ rte_spinlock_unlock(&port->mac_stats_lock);
+
+ return nstats;
+}
+
+static int
+sfc_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int xstats_count)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ unsigned int i;
+ unsigned int nstats = 0;
+
+ for (i = 0; i < EFX_MAC_NSTATS; ++i) {
+ if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
+ if (xstats_names != NULL && nstats < xstats_count)
+ strncpy(xstats_names[nstats].name,
+ efx_mac_stat_name(sa->nic, i),
+ sizeof(xstats_names[0].name));
+ nstats++;
+ }
+ }
+
+ return nstats;
+}
+
+static int
+sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ unsigned int wanted_fc, link_fc;
+
+ memset(fc_conf, 0, sizeof(*fc_conf));
+
+ sfc_adapter_lock(sa);
+
+ if (sa->state == SFC_ADAPTER_STARTED)
+ efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc);
+ else
+ link_fc = sa->port.flow_ctrl;
+
+ switch (link_fc) {
+ case 0:
+ fc_conf->mode = RTE_FC_NONE;
+ break;
+ case EFX_FCNTL_RESPOND:
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ break;
+ case EFX_FCNTL_GENERATE:
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ break;
+ case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
+ fc_conf->mode = RTE_FC_FULL;
+ break;
+ default:
+ sfc_err(sa, "%s: unexpected flow control value %#x",
+ __func__, link_fc);
+ }
+
+ fc_conf->autoneg = sa->port.flow_ctrl_autoneg;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static int
+sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ unsigned int fcntl;
+ int rc;
+
+ if (fc_conf->high_water != 0 || fc_conf->low_water != 0 ||
+ fc_conf->pause_time != 0 || fc_conf->send_xon != 0 ||
+ fc_conf->mac_ctrl_frame_fwd != 0) {
+ sfc_err(sa, "unsupported flow control settings specified");
+ rc = EINVAL;
+ goto fail_inval;
+ }
+
+ switch (fc_conf->mode) {
+ case RTE_FC_NONE:
+ fcntl = 0;
+ break;
+ case RTE_FC_RX_PAUSE:
+ fcntl = EFX_FCNTL_RESPOND;
+ break;
+ case RTE_FC_TX_PAUSE:
+ fcntl = EFX_FCNTL_GENERATE;
+ break;
+ case RTE_FC_FULL:
+ fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail_inval;
+ }
+
+ sfc_adapter_lock(sa);
+
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg);
+ if (rc != 0)
+ goto fail_mac_fcntl_set;
+ }
+
+ port->flow_ctrl = fcntl;
+ port->flow_ctrl_autoneg = fc_conf->autoneg;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+
+fail_mac_fcntl_set:
+ sfc_adapter_unlock(sa);
+fail_inval:
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static int
+sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ size_t pdu = EFX_MAC_PDU(mtu);
+ size_t old_pdu;
+ int rc;
+
+ sfc_log_init(sa, "mtu=%u", mtu);
+
+ rc = EINVAL;
+ if (pdu < EFX_MAC_PDU_MIN) {
+ sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)",
+ (unsigned int)mtu, (unsigned int)pdu,
+ EFX_MAC_PDU_MIN);
+ goto fail_inval;
+ }
+ if (pdu > EFX_MAC_PDU_MAX) {
+ sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)",
+ (unsigned int)mtu, (unsigned int)pdu,
+ EFX_MAC_PDU_MAX);
+ goto fail_inval;
+ }
+
+ sfc_adapter_lock(sa);
+
+ if (pdu != sa->port.pdu) {
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ sfc_stop(sa);
+
+ old_pdu = sa->port.pdu;
+ sa->port.pdu = pdu;
+ rc = sfc_start(sa);
+ if (rc != 0)
+ goto fail_start;
+ } else {
+ sa->port.pdu = pdu;
+ }
+ }
+
+ /*
+ * The driver does not use it, but other PMDs update jumbo_frame
+ * flag and max_rx_pkt_len when MTU is set.
+ */
+ dev->data->dev_conf.rxmode.jumbo_frame = (mtu > ETHER_MAX_LEN);
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
+
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_start:
+ sa->port.pdu = old_pdu;
+ if (sfc_start(sa) != 0)
+ sfc_err(sa, "cannot start with neither new (%u) nor old (%u) "
+ "PDU max size - port is stopped",
+ (unsigned int)pdu, (unsigned int)old_pdu);
+ sfc_adapter_unlock(sa);
+
+fail_inval:
+ sfc_log_init(sa, "failed %d", rc);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+static void
+sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ int rc;
+
+ sfc_adapter_lock(sa);
+
+ if (sa->state != SFC_ADAPTER_STARTED) {
+ sfc_info(sa, "the port is not started");
+ sfc_info(sa, "the new MAC address will be set on port start");
+
+ goto unlock;
+ }
+
+ if (encp->enc_allow_set_mac_with_installed_filters) {
+ rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes);
+ if (rc != 0) {
+ sfc_err(sa, "cannot set MAC address (rc = %u)", rc);
+ goto unlock;
+ }
+
+ /*
+ * Changing the MAC address by means of MCDI request
+ * has no effect on received traffic, therefore
+ * we also need to update unicast filters
+ */
+ rc = sfc_set_rx_mode(sa);
+ if (rc != 0)
+ sfc_err(sa, "cannot set filter (rc = %u)", rc);
+ } else {
+ sfc_warn(sa, "cannot set MAC address with filters installed");
+ sfc_warn(sa, "adapter will be restarted to pick the new MAC");
+ sfc_warn(sa, "(some traffic may be dropped)");
+
+ /*
+ * Since setting MAC address with filters installed is not
+ * allowed on the adapter, one needs to simply restart adapter
+ * so that the new MAC address will be taken from an outer
+ * storage and set flawlessly by means of sfc_start() call
+ */
+ sfc_stop(sa);
+ rc = sfc_start(sa);
+ if (rc != 0)
+ sfc_err(sa, "cannot restart adapter (rc = %u)", rc);
+ }
+
+unlock:
+ sfc_adapter_unlock(sa);
+}
+
+
+static int
+sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ uint8_t *mc_addrs = port->mcast_addrs;
+ int rc;
+ unsigned int i;
+
+ if (mc_addrs == NULL)
+ return -ENOBUFS;
+
+ if (nb_mc_addr > port->max_mcast_addrs) {
+ sfc_err(sa, "too many multicast addresses: %u > %u",
+ nb_mc_addr, port->max_mcast_addrs);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nb_mc_addr; ++i) {
+ (void)rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes,
+ EFX_MAC_ADDR_LEN);
+ mc_addrs += EFX_MAC_ADDR_LEN;
+ }
+
+ port->nb_mcast_addrs = nb_mc_addr;
+
+ if (sa->state != SFC_ADAPTER_STARTED)
+ return 0;
+
+ rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
+ port->nb_mcast_addrs);
+ if (rc != 0)
+ sfc_err(sa, "cannot set multicast address list (rc = %u)", rc);
+
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static void
+sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_rxq *rxq;
+
+ sfc_adapter_lock(sa);
+
+ SFC_ASSERT(rx_queue_id < sa->rxq_count);
+
+ rxq_info = &sa->rxq_info[rx_queue_id];
+ rxq = rxq_info->rxq;
+ SFC_ASSERT(rxq != NULL);
+
+ qinfo->mp = rxq->refill_mb_pool;
+ qinfo->conf.rx_free_thresh = rxq->refill_threshold;
+ qinfo->conf.rx_drop_en = 1;
+ qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
+ qinfo->scattered_rx = (rxq_info->type == EFX_RXQ_TYPE_SCATTER);
+ qinfo->nb_desc = rxq_info->entries;
+
+ sfc_adapter_unlock(sa);
+}
+
+static void
+sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_txq_info *txq_info;
+
+ sfc_adapter_lock(sa);
+
+ SFC_ASSERT(tx_queue_id < sa->txq_count);
+
+ txq_info = &sa->txq_info[tx_queue_id];
+ SFC_ASSERT(txq_info->txq != NULL);
+
+ memset(qinfo, 0, sizeof(*qinfo));
+
+ qinfo->conf.txq_flags = txq_info->txq->flags;
+ qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh;
+ qinfo->conf.tx_deferred_start = txq_info->deferred_start;
+ qinfo->nb_desc = txq_info->entries;
+
+ sfc_adapter_unlock(sa);
+}
+
+static uint32_t
+sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "RxQ=%u", rx_queue_id);
+
+ return sfc_rx_qdesc_npending(sa, rx_queue_id);
+}
+
+static int
+sfc_rx_descriptor_done(void *queue, uint16_t offset)
+{
+ struct sfc_dp_rxq *dp_rxq = queue;
+
+ return sfc_rx_qdesc_done(dp_rxq, offset);
+}
+
+static int
+sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "RxQ=%u", rx_queue_id);
+
+ sfc_adapter_lock(sa);
+
+ rc = EINVAL;
+ if (sa->state != SFC_ADAPTER_STARTED)
+ goto fail_not_started;
+
+ rc = sfc_rx_qstart(sa, rx_queue_id);
+ if (rc != 0)
+ goto fail_rx_qstart;
+
+ sa->rxq_info[rx_queue_id].deferred_started = B_TRUE;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+
+fail_rx_qstart:
+fail_not_started:
+ sfc_adapter_unlock(sa);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static int
+sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "RxQ=%u", rx_queue_id);
+
+ sfc_adapter_lock(sa);
+ sfc_rx_qstop(sa, rx_queue_id);
+
+ sa->rxq_info[rx_queue_id].deferred_started = B_FALSE;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static int
+sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "TxQ = %u", tx_queue_id);
+
+ sfc_adapter_lock(sa);
+
+ rc = EINVAL;
+ if (sa->state != SFC_ADAPTER_STARTED)
+ goto fail_not_started;
+
+ rc = sfc_tx_qstart(sa, tx_queue_id);
+ if (rc != 0)
+ goto fail_tx_qstart;
+
+ sa->txq_info[tx_queue_id].deferred_started = B_TRUE;
+
+ sfc_adapter_unlock(sa);
+ return 0;
+
+fail_tx_qstart:
+
+fail_not_started:
+ sfc_adapter_unlock(sa);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static int
+sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "TxQ = %u", tx_queue_id);
+
+ sfc_adapter_lock(sa);
+
+ sfc_tx_qstop(sa, tx_queue_id);
+
+ sa->txq_info[tx_queue_id].deferred_started = B_FALSE;
+
+ sfc_adapter_unlock(sa);
+ return 0;
+}
+
+#if EFSYS_OPT_RX_SCALE
+static int
+sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)
+ return -ENOTSUP;
+
+ if (sa->rss_channels == 0)
+ return -EINVAL;
+
+ sfc_adapter_lock(sa);
+
+ /*
+ * Mapping of hash configuration between RTE and EFX is not one-to-one,
+ * hence, conversion is done here to derive a correct set of ETH_RSS
+ * flags which corresponds to the active EFX configuration stored
+ * locally in 'sfc_adapter' and kept up-to-date
+ */
+ rss_conf->rss_hf = sfc_efx_to_rte_hash_type(sa->rss_hash_types);
+ rss_conf->rss_key_len = SFC_RSS_KEY_SIZE;
+ if (rss_conf->rss_key != NULL)
+ rte_memcpy(rss_conf->rss_key, sa->rss_key, SFC_RSS_KEY_SIZE);
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static int
+sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ unsigned int efx_hash_types;
+ int rc = 0;
+
+ if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
+ sfc_err(sa, "RSS is not available");
+ return -ENOTSUP;
+ }
+
+ if (sa->rss_channels == 0) {
+ sfc_err(sa, "RSS is not configured");
+ return -EINVAL;
+ }
+
+ if ((rss_conf->rss_key != NULL) &&
+ (rss_conf->rss_key_len != sizeof(sa->rss_key))) {
+ sfc_err(sa, "RSS key size is wrong (should be %lu)",
+ sizeof(sa->rss_key));
+ return -EINVAL;
+ }
+
+ if ((rss_conf->rss_hf & ~SFC_RSS_OFFLOADS) != 0) {
+ sfc_err(sa, "unsupported hash functions requested");
+ return -EINVAL;
+ }
+
+ sfc_adapter_lock(sa);
+
+ efx_hash_types = sfc_rte_to_efx_hash_type(rss_conf->rss_hf);
+
+ rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
+ efx_hash_types, B_TRUE);
+ if (rc != 0)
+ goto fail_scale_mode_set;
+
+ if (rss_conf->rss_key != NULL) {
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = efx_rx_scale_key_set(sa->nic, rss_conf->rss_key,
+ sizeof(sa->rss_key));
+ if (rc != 0)
+ goto fail_scale_key_set;
+ }
+
+ rte_memcpy(sa->rss_key, rss_conf->rss_key, sizeof(sa->rss_key));
+ }
+
+ sa->rss_hash_types = efx_hash_types;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+
+fail_scale_key_set:
+ if (efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
+ sa->rss_hash_types, B_TRUE) != 0)
+ sfc_err(sa, "failed to restore RSS mode");
+
+fail_scale_mode_set:
+ sfc_adapter_unlock(sa);
+ return -rc;
+}
+
+static int
+sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int entry;
+
+ if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)
+ return -ENOTSUP;
+
+ if (sa->rss_channels == 0)
+ return -EINVAL;
+
+ if (reta_size != EFX_RSS_TBL_SIZE)
+ return -EINVAL;
+
+ sfc_adapter_lock(sa);
+
+ for (entry = 0; entry < reta_size; entry++) {
+ int grp = entry / RTE_RETA_GROUP_SIZE;
+ int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+
+ if ((reta_conf[grp].mask >> grp_idx) & 1)
+ reta_conf[grp].reta[grp_idx] = sa->rss_tbl[entry];
+ }
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static int
+sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ unsigned int *rss_tbl_new;
+ uint16_t entry;
+ int rc;
+
+
+ if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
+ sfc_err(sa, "RSS is not available");
+ return -ENOTSUP;
+ }
+
+ if (sa->rss_channels == 0) {
+ sfc_err(sa, "RSS is not configured");
+ return -EINVAL;
+ }
+
+ if (reta_size != EFX_RSS_TBL_SIZE) {
+ sfc_err(sa, "RETA size is wrong (should be %u)",
+ EFX_RSS_TBL_SIZE);
+ return -EINVAL;
+ }
+
+ rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(sa->rss_tbl), 0);
+ if (rss_tbl_new == NULL)
+ return -ENOMEM;
+
+ sfc_adapter_lock(sa);
+
+ rte_memcpy(rss_tbl_new, sa->rss_tbl, sizeof(sa->rss_tbl));
+
+ for (entry = 0; entry < reta_size; entry++) {
+ int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+ struct rte_eth_rss_reta_entry64 *grp;
+
+ grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
+
+ if (grp->mask & (1ull << grp_idx)) {
+ if (grp->reta[grp_idx] >= sa->rss_channels) {
+ rc = EINVAL;
+ goto bad_reta_entry;
+ }
+ rss_tbl_new[entry] = grp->reta[grp_idx];
+ }
+ }
+
+ rc = efx_rx_scale_tbl_set(sa->nic, rss_tbl_new, EFX_RSS_TBL_SIZE);
+ if (rc == 0)
+ rte_memcpy(sa->rss_tbl, rss_tbl_new, sizeof(sa->rss_tbl));
+
+bad_reta_entry:
+ sfc_adapter_unlock(sa);
+
+ rte_free(rss_tbl_new);
+
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+#endif
+
+static int
+sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc = ENOTSUP;
+
+ sfc_log_init(sa, "entry");
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NONE:
+ sfc_err(sa, "Global filters configuration not supported");
+ break;
+ case RTE_ETH_FILTER_MACVLAN:
+ sfc_err(sa, "MACVLAN filters not supported");
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ sfc_err(sa, "EtherType filters not supported");
+ break;
+ case RTE_ETH_FILTER_FLEXIBLE:
+ sfc_err(sa, "Flexible filters not supported");
+ break;
+ case RTE_ETH_FILTER_SYN:
+ sfc_err(sa, "SYN filters not supported");
+ break;
+ case RTE_ETH_FILTER_NTUPLE:
+ sfc_err(sa, "NTUPLE filters not supported");
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ sfc_err(sa, "Tunnel filters not supported");
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ sfc_err(sa, "Flow Director filters not supported");
+ break;
+ case RTE_ETH_FILTER_HASH:
+ sfc_err(sa, "Hash filters not supported");
+ break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET) {
+ rc = EINVAL;
+ } else {
+ *(const void **)arg = &sfc_flow_ops;
+ rc = 0;
+ }
+ break;
+ default:
+ sfc_err(sa, "Unknown filter type %u", filter_type);
+ break;
+ }
+
+ sfc_log_init(sa, "exit: %d", -rc);
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+
+static const struct eth_dev_ops sfc_eth_dev_ops = {
+ .dev_configure = sfc_dev_configure,
+ .dev_start = sfc_dev_start,
+ .dev_stop = sfc_dev_stop,
+ .dev_set_link_up = sfc_dev_set_link_up,
+ .dev_set_link_down = sfc_dev_set_link_down,
+ .dev_close = sfc_dev_close,
+ .promiscuous_enable = sfc_dev_promisc_enable,
+ .promiscuous_disable = sfc_dev_promisc_disable,
+ .allmulticast_enable = sfc_dev_allmulti_enable,
+ .allmulticast_disable = sfc_dev_allmulti_disable,
+ .link_update = sfc_dev_link_update,
+ .stats_get = sfc_stats_get,
+ .stats_reset = sfc_stats_reset,
+ .xstats_get = sfc_xstats_get,
+ .xstats_reset = sfc_stats_reset,
+ .xstats_get_names = sfc_xstats_get_names,
+ .dev_infos_get = sfc_dev_infos_get,
+ .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
+ .mtu_set = sfc_dev_set_mtu,
+ .rx_queue_start = sfc_rx_queue_start,
+ .rx_queue_stop = sfc_rx_queue_stop,
+ .tx_queue_start = sfc_tx_queue_start,
+ .tx_queue_stop = sfc_tx_queue_stop,
+ .rx_queue_setup = sfc_rx_queue_setup,
+ .rx_queue_release = sfc_rx_queue_release,
+ .rx_queue_count = sfc_rx_queue_count,
+ .rx_descriptor_done = sfc_rx_descriptor_done,
+ .tx_queue_setup = sfc_tx_queue_setup,
+ .tx_queue_release = sfc_tx_queue_release,
+ .flow_ctrl_get = sfc_flow_ctrl_get,
+ .flow_ctrl_set = sfc_flow_ctrl_set,
+ .mac_addr_set = sfc_mac_addr_set,
+#if EFSYS_OPT_RX_SCALE
+ .reta_update = sfc_dev_rss_reta_update,
+ .reta_query = sfc_dev_rss_reta_query,
+ .rss_hash_update = sfc_dev_rss_hash_update,
+ .rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
+#endif
+ .filter_ctrl = sfc_dev_filter_ctrl,
+ .set_mc_addr_list = sfc_set_mc_addr_list,
+ .rxq_info_get = sfc_rx_queue_info_get,
+ .txq_info_get = sfc_tx_queue_info_get,
+ .fw_version_get = sfc_fw_version_get,
+};
+
+static int
+sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ unsigned int avail_caps = 0;
+ const char *rx_name = NULL;
+ const char *tx_name = NULL;
+ int rc;
+
+ switch (sa->family) {
+ case EFX_FAMILY_HUNTINGTON:
+ case EFX_FAMILY_MEDFORD:
+ avail_caps |= SFC_DP_HW_FW_CAP_EF10;
+ break;
+ default:
+ break;
+ }
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
+ sfc_kvarg_string_handler, &rx_name);
+ if (rc != 0)
+ goto fail_kvarg_rx_datapath;
+
+ if (rx_name != NULL) {
+ sa->dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name);
+ if (sa->dp_rx == NULL) {
+ sfc_err(sa, "Rx datapath %s not found", rx_name);
+ rc = ENOENT;
+ goto fail_dp_rx;
+ }
+ if (!sfc_dp_match_hw_fw_caps(&sa->dp_rx->dp, avail_caps)) {
+ sfc_err(sa,
+ "Insufficient Hw/FW capabilities to use Rx datapath %s",
+ rx_name);
+ rc = EINVAL;
+ goto fail_dp_rx;
+ }
+ } else {
+ sa->dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps);
+ if (sa->dp_rx == NULL) {
+ sfc_err(sa, "Rx datapath by caps %#x not found",
+ avail_caps);
+ rc = ENOENT;
+ goto fail_dp_rx;
+ }
+ }
+
+ sfc_info(sa, "use %s Rx datapath", sa->dp_rx->dp.name);
+
+ dev->rx_pkt_burst = sa->dp_rx->pkt_burst;
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH,
+ sfc_kvarg_string_handler, &tx_name);
+ if (rc != 0)
+ goto fail_kvarg_tx_datapath;
+
+ if (tx_name != NULL) {
+ sa->dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name);
+ if (sa->dp_tx == NULL) {
+ sfc_err(sa, "Tx datapath %s not found", tx_name);
+ rc = ENOENT;
+ goto fail_dp_tx;
+ }
+ if (!sfc_dp_match_hw_fw_caps(&sa->dp_tx->dp, avail_caps)) {
+ sfc_err(sa,
+ "Insufficient Hw/FW capabilities to use Tx datapath %s",
+ tx_name);
+ rc = EINVAL;
+ goto fail_dp_tx;
+ }
+ } else {
+ sa->dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps);
+ if (sa->dp_tx == NULL) {
+ sfc_err(sa, "Tx datapath by caps %#x not found",
+ avail_caps);
+ rc = ENOENT;
+ goto fail_dp_tx;
+ }
+ }
+
+ sfc_info(sa, "use %s Tx datapath", sa->dp_tx->dp.name);
+
+ dev->tx_pkt_burst = sa->dp_tx->pkt_burst;
+
+ dev->dev_ops = &sfc_eth_dev_ops;
+
+ return 0;
+
+fail_dp_tx:
+fail_kvarg_tx_datapath:
+fail_dp_rx:
+fail_kvarg_rx_datapath:
+ return rc;
+}
+
+static void
+sfc_register_dp(void)
+{
+ /* Register once */
+ if (TAILQ_EMPTY(&sfc_dp_head)) {
+ /* Prefer EF10 datapath */
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
+
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp);
+ }
+}
+
+static int
+sfc_eth_dev_init(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(dev);
+ int rc;
+ const efx_nic_cfg_t *encp;
+ const struct ether_addr *from;
+
+ sfc_register_dp();
+
+ /* Required for logging */
+ sa->eth_dev = dev;
+
+ /* Copy PCI device info to the dev->data */
+ rte_eth_copy_pci_info(dev, pci_dev);
+
+ rc = sfc_kvargs_parse(sa);
+ if (rc != 0)
+ goto fail_kvargs_parse;
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT,
+ sfc_kvarg_bool_handler, &sa->debug_init);
+ if (rc != 0)
+ goto fail_kvarg_debug_init;
+
+ sfc_log_init(sa, "entry");
+
+ dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0);
+ if (dev->data->mac_addrs == NULL) {
+ rc = ENOMEM;
+ goto fail_mac_addrs;
+ }
+
+ sfc_adapter_lock_init(sa);
+ sfc_adapter_lock(sa);
+
+ sfc_log_init(sa, "probing");
+ rc = sfc_probe(sa);
+ if (rc != 0)
+ goto fail_probe;
+
+ sfc_log_init(sa, "set device ops");
+ rc = sfc_eth_dev_set_ops(dev);
+ if (rc != 0)
+ goto fail_set_ops;
+
+ sfc_log_init(sa, "attaching");
+ rc = sfc_attach(sa);
+ if (rc != 0)
+ goto fail_attach;
+
+ encp = efx_nic_cfg_get(sa->nic);
+
+ /*
+ * The arguments are really reverse order in comparison to
+ * Linux kernel. Copy from NIC config to Ethernet device data.
+ */
+ from = (const struct ether_addr *)(encp->enc_mac_addr);
+ ether_addr_copy(from, &dev->data->mac_addrs[0]);
+
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_attach:
+fail_set_ops:
+ sfc_unprobe(sa);
+
+fail_probe:
+ sfc_adapter_unlock(sa);
+ sfc_adapter_lock_fini(sa);
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+fail_mac_addrs:
+fail_kvarg_debug_init:
+ sfc_kvargs_cleanup(sa);
+
+fail_kvargs_parse:
+ sfc_log_init(sa, "failed %d", rc);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static int
+sfc_eth_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+
+ sfc_detach(sa);
+ sfc_unprobe(sa);
+
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ sfc_kvargs_cleanup(sa);
+
+ sfc_adapter_unlock(sa);
+ sfc_adapter_lock_fini(sa);
+
+ sfc_log_init(sa, "done");
+
+ /* Required for logging, so cleanup last */
+ sa->eth_dev = NULL;
+ return 0;
+}
+
+static const struct rte_pci_id pci_id_sfc_efx_map[] = {
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE_VF) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) },
+ { .vendor_id = 0 /* sentinel */ }
+};
+
+static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct sfc_adapter), sfc_eth_dev_init);
+}
+
+static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, sfc_eth_dev_uninit);
+}
+
+static struct rte_pci_driver sfc_efx_pmd = {
+ .id_table = pci_id_sfc_efx_map,
+ .drv_flags =
+ RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_NEED_MAPPING,
+ .probe = sfc_eth_dev_pci_probe,
+ .remove = sfc_eth_dev_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
+ SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
+ SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " "
+ SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
+ SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long> "
+ SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " "
+ SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL);
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_ev.c b/src/seastar/dpdk/drivers/net/sfc/sfc_ev.c
new file mode 100644
index 00000000..160d39f9
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_ev.c
@@ -0,0 +1,921 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_alarm.h>
+#include <rte_branch_prediction.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+#include "sfc_rx.h"
+#include "sfc_tx.h"
+#include "sfc_kvargs.h"
+
+
+/* Initial delay when waiting for event queue init complete event */
+#define SFC_EVQ_INIT_BACKOFF_START_US (1)
+/* Maximum delay between event queue polling attempts */
+#define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000)
+/* Event queue init approx timeout */
+#define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S)
+
+/* Management event queue polling period in microseconds */
+#define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S)
+
+static const char *
+sfc_evq_type2str(enum sfc_evq_type type)
+{
+ switch (type) {
+ case SFC_EVQ_TYPE_MGMT:
+ return "mgmt-evq";
+ case SFC_EVQ_TYPE_RX:
+ return "rx-evq";
+ case SFC_EVQ_TYPE_TX:
+ return "tx-evq";
+ default:
+ SFC_ASSERT(B_FALSE);
+ return NULL;
+ }
+}
+
+static boolean_t
+sfc_ev_initialized(void *arg)
+{
+ struct sfc_evq *evq = arg;
+
+ /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
+ SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
+ evq->init_state == SFC_EVQ_STARTED);
+
+ evq->init_state = SFC_EVQ_STARTED;
+
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id,
+ uint32_t size, uint16_t flags)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa,
+ "EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x",
+ evq->evq_index, label, id, size, flags);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
+ uint32_t size, uint16_t flags)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_efx_rxq *rxq;
+ unsigned int stop;
+ unsigned int pending_id;
+ unsigned int delta;
+ unsigned int i;
+ struct sfc_efx_rx_sw_desc *rxd;
+
+ if (unlikely(evq->exception))
+ goto done;
+
+ rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq);
+
+ SFC_ASSERT(rxq != NULL);
+ SFC_ASSERT(rxq->evq == evq);
+ SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED);
+
+ stop = (id + 1) & rxq->ptr_mask;
+ pending_id = rxq->pending & rxq->ptr_mask;
+ delta = (stop >= pending_id) ? (stop - pending_id) :
+ (rxq->ptr_mask + 1 - pending_id + stop);
+
+ if (delta == 0) {
+ /*
+ * Rx event with no new descriptors done and zero length
+ * is used to abort scattered packet when there is no room
+ * for the tail.
+ */
+ if (unlikely(size != 0)) {
+ evq->exception = B_TRUE;
+ sfc_err(evq->sa,
+ "EVQ %u RxQ %u invalid RX abort "
+ "(id=%#x size=%u flags=%#x); needs restart",
+ evq->evq_index, rxq->dp.dpq.queue_id,
+ id, size, flags);
+ goto done;
+ }
+
+ /* Add discard flag to the first fragment */
+ rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
+ /* Remove continue flag from the last fragment */
+ rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
+ } else if (unlikely(delta > rxq->batch_max)) {
+ evq->exception = B_TRUE;
+
+ sfc_err(evq->sa,
+ "EVQ %u RxQ %u completion out of order "
+ "(id=%#x delta=%u flags=%#x); needs restart",
+ evq->evq_index, rxq->dp.dpq.queue_id,
+ id, delta, flags);
+
+ goto done;
+ }
+
+ for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
+ rxd = &rxq->sw_desc[i];
+
+ rxd->flags = flags;
+
+ SFC_ASSERT(size < (1 << 16));
+ rxd->size = (uint16_t)size;
+ }
+
+ rxq->pending += delta;
+
+done:
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
+ __rte_unused uint32_t size, __rte_unused uint16_t flags)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_rxq *dp_rxq;
+
+ dp_rxq = evq->dp_rxq;
+ SFC_ASSERT(dp_rxq != NULL);
+
+ SFC_ASSERT(evq->sa->dp_rx->qrx_ev != NULL);
+ return evq->sa->dp_rx->qrx_ev(dp_rxq, id);
+}
+
+static boolean_t
+sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x",
+ evq->evq_index, label, id);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_txq *dp_txq;
+ struct sfc_efx_txq *txq;
+ unsigned int stop;
+ unsigned int delta;
+
+ dp_txq = evq->dp_txq;
+ SFC_ASSERT(dp_txq != NULL);
+
+ txq = sfc_efx_txq_by_dp_txq(dp_txq);
+ SFC_ASSERT(txq->evq == evq);
+
+ if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0))
+ goto done;
+
+ stop = (id + 1) & txq->ptr_mask;
+ id = txq->pending & txq->ptr_mask;
+
+ delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop);
+
+ txq->pending += delta;
+
+done:
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_txq *dp_txq;
+
+ dp_txq = evq->dp_txq;
+ SFC_ASSERT(dp_txq != NULL);
+
+ SFC_ASSERT(evq->sa->dp_tx->qtx_ev != NULL);
+ return evq->sa->dp_tx->qtx_ev(dp_txq, id);
+}
+
+static boolean_t
+sfc_ev_exception(void *arg, __rte_unused uint32_t code,
+ __rte_unused uint32_t data)
+{
+ struct sfc_evq *evq = arg;
+
+ if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
+ return B_FALSE;
+
+ evq->exception = B_TRUE;
+ sfc_warn(evq->sa,
+ "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
+ " needs recovery",
+ (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
+ (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
+ (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
+ (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
+ (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
+ (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
+ (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
+ (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
+ "UNKNOWN",
+ code, data, evq->evq_index);
+
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done",
+ evq->evq_index, rxq_hw_index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_rxq *dp_rxq;
+ struct sfc_rxq *rxq;
+
+ dp_rxq = evq->dp_rxq;
+ SFC_ASSERT(dp_rxq != NULL);
+
+ rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+ SFC_ASSERT(rxq != NULL);
+ SFC_ASSERT(rxq->hw_index == rxq_hw_index);
+ SFC_ASSERT(rxq->evq == evq);
+ sfc_rx_qflush_done(rxq);
+
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed",
+ evq->evq_index, rxq_hw_index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_rxq *dp_rxq;
+ struct sfc_rxq *rxq;
+
+ dp_rxq = evq->dp_rxq;
+ SFC_ASSERT(dp_rxq != NULL);
+
+ rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+ SFC_ASSERT(rxq != NULL);
+ SFC_ASSERT(rxq->hw_index == rxq_hw_index);
+ SFC_ASSERT(rxq->evq == evq);
+ sfc_rx_qflush_failed(rxq);
+
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done",
+ evq->evq_index, txq_hw_index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_txq *dp_txq;
+ struct sfc_txq *txq;
+
+ dp_txq = evq->dp_txq;
+ SFC_ASSERT(dp_txq != NULL);
+
+ txq = sfc_txq_by_dp_txq(dp_txq);
+ SFC_ASSERT(txq != NULL);
+ SFC_ASSERT(txq->hw_index == txq_hw_index);
+ SFC_ASSERT(txq->evq == evq);
+ sfc_tx_qflush_done(txq);
+
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_software(void *arg, uint16_t magic)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
+ evq->evq_index, magic);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_sram(void *arg, uint32_t code)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
+ evq->evq_index, code);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_wake_up(void *arg, uint32_t index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
+ evq->evq_index, index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_timer(void *arg, uint32_t index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
+ evq->evq_index, index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected link change event",
+ evq->evq_index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_adapter *sa = evq->sa;
+ struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
+ struct rte_eth_link new_link;
+ uint64_t new_link_u64;
+ uint64_t old_link_u64;
+
+ EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
+
+ sfc_port_link_mode_to_info(link_mode, &new_link);
+
+ new_link_u64 = *(uint64_t *)&new_link;
+ do {
+ old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link);
+ if (old_link_u64 == new_link_u64)
+ break;
+
+ if (rte_atomic64_cmpset((volatile uint64_t *)dev_link,
+ old_link_u64, new_link_u64)) {
+ evq->sa->port.lsc_seq++;
+ break;
+ }
+ } while (B_TRUE);
+
+ return B_FALSE;
+}
+
+static const efx_ev_callbacks_t sfc_ev_callbacks = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_nop_rx,
+ .eec_tx = sfc_ev_nop_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_link_change,
+};
+
+static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_efx_rx,
+ .eec_tx = sfc_ev_nop_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_nop_link_change,
+};
+
+static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_dp_rx,
+ .eec_tx = sfc_ev_nop_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_nop_link_change,
+};
+
+static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_nop_rx,
+ .eec_tx = sfc_ev_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_nop_link_change,
+};
+
+static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_nop_rx,
+ .eec_tx = sfc_ev_dp_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_nop_link_change,
+};
+
+
+void
+sfc_ev_qpoll(struct sfc_evq *evq)
+{
+ SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
+ evq->init_state == SFC_EVQ_STARTING);
+
+ /* Synchronize the DMA memory for reading not required */
+
+ efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq);
+
+ if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) {
+ struct sfc_adapter *sa = evq->sa;
+ int rc;
+
+ if (evq->dp_rxq != NULL) {
+ unsigned int rxq_sw_index;
+
+ rxq_sw_index = evq->dp_rxq->dpq.queue_id;
+
+ sfc_warn(sa,
+ "restart RxQ %u because of exception on its EvQ %u",
+ rxq_sw_index, evq->evq_index);
+
+ sfc_rx_qstop(sa, rxq_sw_index);
+ rc = sfc_rx_qstart(sa, rxq_sw_index);
+ if (rc != 0)
+ sfc_err(sa, "cannot restart RxQ %u",
+ rxq_sw_index);
+ }
+
+ if (evq->dp_txq != NULL) {
+ unsigned int txq_sw_index;
+
+ txq_sw_index = evq->dp_txq->dpq.queue_id;
+
+ sfc_warn(sa,
+ "restart TxQ %u because of exception on its EvQ %u",
+ txq_sw_index, evq->evq_index);
+
+ sfc_tx_qstop(sa, txq_sw_index);
+ rc = sfc_tx_qstart(sa, txq_sw_index);
+ if (rc != 0)
+ sfc_err(sa, "cannot restart TxQ %u",
+ txq_sw_index);
+ }
+
+ if (evq->exception)
+ sfc_panic(sa, "unrecoverable exception on EvQ %u",
+ evq->evq_index);
+
+ sfc_adapter_unlock(sa);
+ }
+
+ /* Poll-mode driver does not re-prime the event queue for interrupts */
+}
+
+void
+sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
+{
+ if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
+ struct sfc_evq *mgmt_evq = sa->mgmt_evq;
+
+ if (mgmt_evq->init_state == SFC_EVQ_STARTED)
+ sfc_ev_qpoll(mgmt_evq);
+
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+ }
+}
+
+int
+sfc_ev_qprime(struct sfc_evq *evq)
+{
+ SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
+ return efx_ev_qprime(evq->common, evq->read_ptr);
+}
+
+/* Event queue HW index allocation scheme is described in sfc_ev.h. */
+int
+sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index)
+{
+ struct sfc_adapter *sa = evq->sa;
+ efsys_mem_t *esmp;
+ uint32_t evq_flags = sa->evq_flags;
+ unsigned int total_delay_us;
+ unsigned int delay_us;
+ int rc;
+
+ sfc_log_init(sa, "hw_index=%u", hw_index);
+
+ esmp = &evq->mem;
+
+ evq->evq_index = hw_index;
+
+ /* Clear all events */
+ (void)memset((void *)esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries));
+
+ if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index)
+ evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
+ else
+ evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED;
+
+ /* Create the common code event queue */
+ rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries,
+ 0 /* unused on EF10 */, 0, evq_flags,
+ &evq->common);
+ if (rc != 0)
+ goto fail_ev_qcreate;
+
+ SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL);
+ if (evq->dp_rxq != 0) {
+ if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
+ evq->callbacks = &sfc_ev_callbacks_efx_rx;
+ else
+ evq->callbacks = &sfc_ev_callbacks_dp_rx;
+ } else if (evq->dp_txq != 0) {
+ if (strcmp(sa->dp_tx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
+ evq->callbacks = &sfc_ev_callbacks_efx_tx;
+ else
+ evq->callbacks = &sfc_ev_callbacks_dp_tx;
+ } else {
+ evq->callbacks = &sfc_ev_callbacks;
+ }
+
+ evq->init_state = SFC_EVQ_STARTING;
+
+ /* Wait for the initialization event */
+ total_delay_us = 0;
+ delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
+ do {
+ (void)sfc_ev_qpoll(evq);
+
+ /* Check to see if the initialization complete indication
+ * posted by the hardware.
+ */
+ if (evq->init_state == SFC_EVQ_STARTED)
+ goto done;
+
+ /* Give event queue some time to init */
+ rte_delay_us(delay_us);
+
+ total_delay_us += delay_us;
+
+ /* Exponential backoff */
+ delay_us *= 2;
+ if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
+ delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
+
+ } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
+
+ rc = ETIMEDOUT;
+ goto fail_timedout;
+
+done:
+ return 0;
+
+fail_timedout:
+ evq->init_state = SFC_EVQ_INITIALIZED;
+ efx_ev_qdestroy(evq->common);
+
+fail_ev_qcreate:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_ev_qstop(struct sfc_evq *evq)
+{
+ if (evq == NULL)
+ return;
+
+ sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index);
+
+ if (evq->init_state != SFC_EVQ_STARTED)
+ return;
+
+ evq->init_state = SFC_EVQ_INITIALIZED;
+ evq->callbacks = NULL;
+ evq->read_ptr = 0;
+ evq->exception = B_FALSE;
+
+ efx_ev_qdestroy(evq->common);
+
+ evq->evq_index = 0;
+}
+
+static void
+sfc_ev_mgmt_periodic_qpoll(void *arg)
+{
+ struct sfc_adapter *sa = arg;
+ int rc;
+
+ sfc_ev_mgmt_qpoll(sa);
+
+ rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
+ sfc_ev_mgmt_periodic_qpoll, sa);
+ if (rc == -ENOTSUP) {
+ sfc_warn(sa, "alarms are not supported");
+ sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update");
+ } else if (rc != 0) {
+ sfc_err(sa,
+ "cannot rearm management EVQ polling alarm (rc=%d)",
+ rc);
+ }
+}
+
+static void
+sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
+{
+ sfc_ev_mgmt_periodic_qpoll(sa);
+}
+
+static void
+sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
+{
+ rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
+}
+
+int
+sfc_ev_start(struct sfc_adapter *sa)
+{
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ rc = efx_ev_init(sa->nic);
+ if (rc != 0)
+ goto fail_ev_init;
+
+ /* Start management EVQ used for global events */
+ rte_spinlock_lock(&sa->mgmt_evq_lock);
+
+ rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index);
+ if (rc != 0)
+ goto fail_mgmt_evq_start;
+
+ if (sa->intr.lsc_intr) {
+ rc = sfc_ev_qprime(sa->mgmt_evq);
+ if (rc != 0)
+ goto fail_evq0_prime;
+ }
+
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+
+ /*
+ * Start management EVQ polling. If interrupts are disabled
+ * (not used), it is required to process link status change
+ * and other device level events to avoid unrecoverable
+ * error because the event queue overflow.
+ */
+ sfc_ev_mgmt_periodic_qpoll_start(sa);
+
+ /*
+ * Rx/Tx event queues are started/stopped when corresponding
+ * Rx/Tx queue is started/stopped.
+ */
+
+ return 0;
+
+fail_evq0_prime:
+ sfc_ev_qstop(sa->mgmt_evq);
+
+fail_mgmt_evq_start:
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+ efx_ev_fini(sa->nic);
+
+fail_ev_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_ev_stop(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ sfc_ev_mgmt_periodic_qpoll_stop(sa);
+
+ rte_spinlock_lock(&sa->mgmt_evq_lock);
+ sfc_ev_qstop(sa->mgmt_evq);
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+
+ efx_ev_fini(sa->nic);
+}
+
+int
+sfc_ev_qinit(struct sfc_adapter *sa,
+ enum sfc_evq_type type, unsigned int type_index,
+ unsigned int entries, int socket_id, struct sfc_evq **evqp)
+{
+ struct sfc_evq *evq;
+ int rc;
+
+ sfc_log_init(sa, "type=%s type_index=%u",
+ sfc_evq_type2str(type), type_index);
+
+ SFC_ASSERT(rte_is_power_of_2(entries));
+
+ rc = ENOMEM;
+ evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (evq == NULL)
+ goto fail_evq_alloc;
+
+ evq->sa = sa;
+ evq->type = type;
+ evq->entries = entries;
+
+ /* Allocate DMA space */
+ rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index,
+ EFX_EVQ_SIZE(evq->entries), socket_id, &evq->mem);
+ if (rc != 0)
+ goto fail_dma_alloc;
+
+ evq->init_state = SFC_EVQ_INITIALIZED;
+
+ sa->evq_count++;
+
+ *evqp = evq;
+
+ return 0;
+
+fail_dma_alloc:
+ rte_free(evq);
+
+fail_evq_alloc:
+
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_ev_qfini(struct sfc_evq *evq)
+{
+ struct sfc_adapter *sa = evq->sa;
+
+ SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
+
+ sfc_dma_free(sa, &evq->mem);
+
+ rte_free(evq);
+
+ SFC_ASSERT(sa->evq_count > 0);
+ sa->evq_count--;
+}
+
+static int
+sfc_kvarg_perf_profile_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ uint64_t *value = opaque;
+
+ if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0)
+ *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
+ else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0)
+ *value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY;
+ else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0)
+ *value = EFX_EVQ_FLAGS_TYPE_AUTO;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+sfc_ev_attach(struct sfc_adapter *sa)
+{
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
+ rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE,
+ sfc_kvarg_perf_profile_handler,
+ &sa->evq_flags);
+ if (rc != 0) {
+ sfc_err(sa, "invalid %s parameter value",
+ SFC_KVARG_PERF_PROFILE);
+ goto fail_kvarg_perf_profile;
+ }
+
+ sa->mgmt_evq_index = 0;
+ rte_spinlock_init(&sa->mgmt_evq_lock);
+
+ rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, SFC_MGMT_EVQ_ENTRIES,
+ sa->socket_id, &sa->mgmt_evq);
+ if (rc != 0)
+ goto fail_mgmt_evq_init;
+
+ /*
+ * Rx/Tx event queues are created/destroyed when corresponding
+ * Rx/Tx queue is created/destroyed.
+ */
+
+ return 0;
+
+fail_mgmt_evq_init:
+
+fail_kvarg_perf_profile:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_ev_detach(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ sfc_ev_qfini(sa->mgmt_evq);
+
+ if (sa->evq_count != 0)
+ sfc_err(sa, "%u EvQs are not destroyed before detach",
+ sa->evq_count);
+}
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_ev.h b/src/seastar/dpdk/drivers/net/sfc/sfc_ev.h
new file mode 100644
index 00000000..065defe0
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_ev.h
@@ -0,0 +1,129 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_EV_H_
+#define _SFC_EV_H_
+
+#include <rte_ethdev.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Number of entries in the management event queue */
+#define SFC_MGMT_EVQ_ENTRIES (EFX_EVQ_MINNEVS)
+
+struct sfc_adapter;
+struct sfc_dp_rxq;
+struct sfc_dp_txq;
+
+enum sfc_evq_state {
+ SFC_EVQ_UNINITIALIZED = 0,
+ SFC_EVQ_INITIALIZED,
+ SFC_EVQ_STARTING,
+ SFC_EVQ_STARTED,
+
+ SFC_EVQ_NSTATES
+};
+
+enum sfc_evq_type {
+ SFC_EVQ_TYPE_MGMT = 0,
+ SFC_EVQ_TYPE_RX,
+ SFC_EVQ_TYPE_TX,
+
+ SFC_EVQ_NTYPES
+};
+
+struct sfc_evq {
+ /* Used on datapath */
+ efx_evq_t *common;
+ const efx_ev_callbacks_t *callbacks;
+ unsigned int read_ptr;
+ boolean_t exception;
+ efsys_mem_t mem;
+ struct sfc_dp_rxq *dp_rxq;
+ struct sfc_dp_txq *dp_txq;
+
+ /* Not used on datapath */
+ struct sfc_adapter *sa;
+ unsigned int evq_index;
+ enum sfc_evq_state init_state;
+ enum sfc_evq_type type;
+ unsigned int entries;
+};
+
+/*
+ * Functions below define event queue to transmit/receive queue and vice
+ * versa mapping.
+ * Own event queue is allocated for management, each Rx and each Tx queue.
+ * Zero event queue is used for management events.
+ * Rx event queues from 1 to RxQ number follow management event queue.
+ * Tx event queues follow Rx event queues.
+ */
+
+static inline unsigned int
+sfc_evq_index_by_rxq_sw_index(__rte_unused struct sfc_adapter *sa,
+ unsigned int rxq_sw_index)
+{
+ return 1 + rxq_sw_index;
+}
+
+static inline unsigned int
+sfc_evq_index_by_txq_sw_index(struct sfc_adapter *sa, unsigned int txq_sw_index)
+{
+ return 1 + sa->eth_dev->data->nb_rx_queues + txq_sw_index;
+}
+
+int sfc_ev_attach(struct sfc_adapter *sa);
+void sfc_ev_detach(struct sfc_adapter *sa);
+int sfc_ev_start(struct sfc_adapter *sa);
+void sfc_ev_stop(struct sfc_adapter *sa);
+
+int sfc_ev_qinit(struct sfc_adapter *sa,
+ enum sfc_evq_type type, unsigned int type_index,
+ unsigned int entries, int socket_id, struct sfc_evq **evqp);
+void sfc_ev_qfini(struct sfc_evq *evq);
+int sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index);
+void sfc_ev_qstop(struct sfc_evq *evq);
+
+int sfc_ev_qprime(struct sfc_evq *evq);
+void sfc_ev_qpoll(struct sfc_evq *evq);
+
+void sfc_ev_mgmt_qpoll(struct sfc_adapter *sa);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_EV_H_ */
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_filter.c b/src/seastar/dpdk/drivers/net/sfc/sfc_filter.c
new file mode 100644
index 00000000..58b74de7
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_filter.c
@@ -0,0 +1,137 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+
+boolean_t
+sfc_filter_is_match_supported(struct sfc_adapter *sa, uint32_t match)
+{
+ struct sfc_filter *filter = &sa->filter;
+ size_t i;
+
+ for (i = 0; i < filter->supported_match_num; ++i) {
+ if (match == filter->supported_match[i])
+ return B_TRUE;
+ }
+
+ return B_FALSE;
+}
+
+static int
+sfc_filter_cache_match_supported(struct sfc_adapter *sa)
+{
+ struct sfc_filter *filter = &sa->filter;
+ size_t num = filter->supported_match_num;
+ uint32_t *buf = filter->supported_match;
+ unsigned int retry;
+ int rc;
+
+ /* Just a guess of possibly sufficient entries */
+ if (num == 0)
+ num = 16;
+
+ for (retry = 0; retry < 2; ++retry) {
+ if (num != filter->supported_match_num) {
+ rc = ENOMEM;
+ buf = rte_realloc(buf, num * sizeof(*buf), 0);
+ if (buf == NULL)
+ goto fail_realloc;
+ }
+
+ rc = efx_filter_supported_filters(sa->nic, buf, num, &num);
+ if (rc == 0) {
+ filter->supported_match_num = num;
+ filter->supported_match = buf;
+
+ return 0;
+ } else if (rc != ENOSPC) {
+ goto fail_efx_filter_supported_filters;
+ }
+ }
+
+ SFC_ASSERT(rc == ENOSPC);
+
+fail_efx_filter_supported_filters:
+fail_realloc:
+ /* Original pointer is not freed by rte_realloc() on failure */
+ rte_free(buf);
+ filter->supported_match = NULL;
+ filter->supported_match_num = 0;
+ return rc;
+}
+
+int
+sfc_filter_attach(struct sfc_adapter *sa)
+{
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ rc = efx_filter_init(sa->nic);
+ if (rc != 0)
+ goto fail_filter_init;
+
+ rc = sfc_filter_cache_match_supported(sa);
+ if (rc != 0)
+ goto fail_cache_match_supported;
+
+ efx_filter_fini(sa->nic);
+
+ sfc_log_init(sa, "done");
+
+ return 0;
+
+fail_cache_match_supported:
+ efx_filter_fini(sa->nic);
+
+fail_filter_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_filter_detach(struct sfc_adapter *sa)
+{
+ struct sfc_filter *filter = &sa->filter;
+
+ sfc_log_init(sa, "entry");
+
+ rte_free(filter->supported_match);
+ filter->supported_match = NULL;
+ filter->supported_match_num = 0;
+
+ sfc_log_init(sa, "done");
+}
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_filter.h b/src/seastar/dpdk/drivers/net/sfc/sfc_filter.h
new file mode 100644
index 00000000..d884f37d
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_filter.h
@@ -0,0 +1,62 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_FILTER_H
+#define _SFC_FILTER_H
+
+#include "efx.h"
+
+#include "sfc_flow.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct sfc_filter {
+ /** Number of elements in match_supported array */
+ size_t supported_match_num;
+ /** Driver cache of supported filter match masks */
+ uint32_t *supported_match;
+ /** List of flow rules */
+ struct sfc_flow_list flow_list;
+};
+
+struct sfc_adapter;
+
+int sfc_filter_attach(struct sfc_adapter *sa);
+void sfc_filter_detach(struct sfc_adapter *sa);
+
+boolean_t sfc_filter_is_match_supported(struct sfc_adapter *sa, uint32_t match);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_FILTER_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_flow.c b/src/seastar/dpdk/drivers/net/sfc/sfc_flow.c
new file mode 100644
index 00000000..c3ea43a6
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_flow.c
@@ -0,0 +1,1175 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_tailq.h>
+#include <rte_common.h>
+#include <rte_ethdev.h>
+#include <rte_eth_ctrl.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_rx.h"
+#include "sfc_filter.h"
+#include "sfc_flow.h"
+#include "sfc_log.h"
+
+/*
+ * At now flow API is implemented in such a manner that each
+ * flow rule is converted to a hardware filter.
+ * All elements of flow rule (attributes, pattern items, actions)
+ * correspond to one or more fields in the efx_filter_spec_s structure
+ * that is responsible for the hardware filter.
+ */
+
+enum sfc_flow_item_layers {
+ SFC_FLOW_ITEM_ANY_LAYER,
+ SFC_FLOW_ITEM_START_LAYER,
+ SFC_FLOW_ITEM_L2,
+ SFC_FLOW_ITEM_L3,
+ SFC_FLOW_ITEM_L4,
+};
+
+typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
+ efx_filter_spec_t *spec,
+ struct rte_flow_error *error);
+
+struct sfc_flow_item {
+ enum rte_flow_item_type type; /* Type of item */
+ enum sfc_flow_item_layers layer; /* Layer of item */
+ enum sfc_flow_item_layers prev_layer; /* Previous layer of item */
+ sfc_flow_item_parse *parse; /* Parsing function */
+};
+
+static sfc_flow_item_parse sfc_flow_parse_void;
+static sfc_flow_item_parse sfc_flow_parse_eth;
+static sfc_flow_item_parse sfc_flow_parse_vlan;
+static sfc_flow_item_parse sfc_flow_parse_ipv4;
+static sfc_flow_item_parse sfc_flow_parse_ipv6;
+static sfc_flow_item_parse sfc_flow_parse_tcp;
+static sfc_flow_item_parse sfc_flow_parse_udp;
+
+static boolean_t
+sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
+{
+ uint8_t sum = 0;
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ sum |= buf[i];
+
+ return (sum == 0) ? B_TRUE : B_FALSE;
+}
+
+/*
+ * Validate item and prepare structures spec and mask for parsing
+ */
+static int
+sfc_flow_parse_init(const struct rte_flow_item *item,
+ const void **spec_ptr,
+ const void **mask_ptr,
+ const void *supp_mask,
+ const void *def_mask,
+ unsigned int size,
+ struct rte_flow_error *error)
+{
+ const uint8_t *spec;
+ const uint8_t *mask;
+ const uint8_t *last;
+ uint8_t match;
+ uint8_t supp;
+ unsigned int i;
+
+ if (item == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "NULL item");
+ return -rte_errno;
+ }
+
+ if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Mask or last is set without spec");
+ return -rte_errno;
+ }
+
+ /*
+ * If "mask" is not set, default mask is used,
+ * but if default mask is NULL, "mask" should be set
+ */
+ if (item->mask == NULL) {
+ if (def_mask == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Mask should be specified");
+ return -rte_errno;
+ }
+
+ mask = (const uint8_t *)def_mask;
+ } else {
+ mask = (const uint8_t *)item->mask;
+ }
+
+ spec = (const uint8_t *)item->spec;
+ last = (const uint8_t *)item->last;
+
+ if (spec == NULL)
+ goto exit;
+
+ /*
+ * If field values in "last" are either 0 or equal to the corresponding
+ * values in "spec" then they are ignored
+ */
+ if (last != NULL &&
+ !sfc_flow_is_zero(last, size) &&
+ memcmp(last, spec, size) != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Ranging is not supported");
+ return -rte_errno;
+ }
+
+ if (supp_mask == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Supported mask for item should be specified");
+ return -rte_errno;
+ }
+
+ /* Check that mask and spec not asks for more match than supp_mask */
+ for (i = 0; i < size; i++) {
+ match = spec[i] | mask[i];
+ supp = ((const uint8_t *)supp_mask)[i];
+
+ if ((match | supp) != supp) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Item's field is not supported");
+ return -rte_errno;
+ }
+ }
+
+exit:
+ *spec_ptr = spec;
+ *mask_ptr = mask;
+ return 0;
+}
+
+/*
+ * Protocol parsers.
+ * Masking is not supported, so masks in items should be either
+ * full or empty (zeroed) and set only for supported fields which
+ * are specified in the supp_mask.
+ */
+
+static int
+sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
+ __rte_unused efx_filter_spec_t *efx_spec,
+ __rte_unused struct rte_flow_error *error)
+{
+ return 0;
+}
+
+/**
+ * Convert Ethernet item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only source and destination addresses and
+ * Ethernet type fields are supported. In addition to full and
+ * empty masks of destination address, individual/group mask is
+ * also supported. If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_eth(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_eth *spec = NULL;
+ const struct rte_flow_item_eth *mask = NULL;
+ const struct rte_flow_item_eth supp_mask = {
+ .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .type = 0xffff,
+ };
+ const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_eth_mask,
+ sizeof(struct rte_flow_item_eth),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /* If "spec" is not set, could be any Ethernet */
+ if (spec == NULL)
+ return 0;
+
+ if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+ rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
+ EFX_MAC_ADDR_LEN);
+ } else if (memcmp(mask->dst.addr_bytes, ig_mask,
+ EFX_MAC_ADDR_LEN) == 0) {
+ if (is_unicast_ether_addr(&spec->dst))
+ efx_spec->efs_match_flags |=
+ EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
+ else
+ efx_spec->efs_match_flags |=
+ EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
+ } else if (!is_zero_ether_addr(&mask->dst)) {
+ goto fail_bad_mask;
+ }
+
+ if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
+ rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
+ EFX_MAC_ADDR_LEN);
+ } else if (!is_zero_ether_addr(&mask->src)) {
+ goto fail_bad_mask;
+ }
+
+ /*
+ * Ether type is in big-endian byte order in item and
+ * in little-endian in efx_spec, so byte swap is used
+ */
+ if (mask->type == supp_mask.type) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ efx_spec->efs_ether_type = rte_bswap16(spec->type);
+ } else if (mask->type != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the ETH pattern item");
+ return -rte_errno;
+}
+
+/**
+ * Convert VLAN item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only VID field is supported.
+ * The mask can not be NULL. Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_vlan(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ uint16_t vid;
+ const struct rte_flow_item_vlan *spec = NULL;
+ const struct rte_flow_item_vlan *mask = NULL;
+ const struct rte_flow_item_vlan supp_mask = {
+ .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ NULL,
+ sizeof(struct rte_flow_item_vlan),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * VID is in big-endian byte order in item and
+ * in little-endian in efx_spec, so byte swap is used.
+ * If two VLAN items are included, the first matches
+ * the outer tag and the next matches the inner tag.
+ */
+ if (mask->tci == supp_mask.tci) {
+ vid = rte_bswap16(spec->tci);
+
+ if (!(efx_spec->efs_match_flags &
+ EFX_FILTER_MATCH_OUTER_VID)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ efx_spec->efs_outer_vid = vid;
+ } else if (!(efx_spec->efs_match_flags &
+ EFX_FILTER_MATCH_INNER_VID)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
+ efx_spec->efs_inner_vid = vid;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "More than two VLAN items");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VLAN ID in TCI match is required");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Convert IPv4 item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only source and destination addresses and
+ * protocol fields are supported. If the mask is NULL, default
+ * mask will be used. Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_ipv4(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_ipv4 *spec = NULL;
+ const struct rte_flow_item_ipv4 *mask = NULL;
+ const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
+ const struct rte_flow_item_ipv4 supp_mask = {
+ .hdr = {
+ .src_addr = 0xffffffff,
+ .dst_addr = 0xffffffff,
+ .next_proto_id = 0xff,
+ }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_ipv4_mask,
+ sizeof(struct rte_flow_item_ipv4),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Filtering by IPv4 source and destination addresses requires
+ * the appropriate ETHER_TYPE in hardware filters
+ */
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ efx_spec->efs_ether_type = ether_type_ipv4;
+ } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Ethertype in pattern with IPV4 item should be appropriate");
+ return -rte_errno;
+ }
+
+ if (spec == NULL)
+ return 0;
+
+ /*
+ * IPv4 addresses are in big-endian byte order in item and in
+ * efx_spec
+ */
+ if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
+ efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
+ } else if (mask->hdr.src_addr != 0) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+ efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
+ } else if (mask->hdr.dst_addr != 0) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
+ } else if (mask->hdr.next_proto_id != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the IPV4 pattern item");
+ return -rte_errno;
+}
+
+/**
+ * Convert IPv6 item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only source and destination addresses and
+ * next header fields are supported. If the mask is NULL, default
+ * mask will be used. Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_ipv6(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_ipv6 *spec = NULL;
+ const struct rte_flow_item_ipv6 *mask = NULL;
+ const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
+ const struct rte_flow_item_ipv6 supp_mask = {
+ .hdr = {
+ .src_addr = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff },
+ .dst_addr = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff },
+ .proto = 0xff,
+ }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_ipv6_mask,
+ sizeof(struct rte_flow_item_ipv6),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Filtering by IPv6 source and destination addresses requires
+ * the appropriate ETHER_TYPE in hardware filters
+ */
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ efx_spec->efs_ether_type = ether_type_ipv6;
+ } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Ethertype in pattern with IPV6 item should be appropriate");
+ return -rte_errno;
+ }
+
+ if (spec == NULL)
+ return 0;
+
+ /*
+ * IPv6 addresses are in big-endian byte order in item and in
+ * efx_spec
+ */
+ if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
+ sizeof(mask->hdr.src_addr)) == 0) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
+
+ RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
+ sizeof(spec->hdr.src_addr));
+ rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
+ sizeof(efx_spec->efs_rem_host));
+ } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
+ sizeof(mask->hdr.src_addr))) {
+ goto fail_bad_mask;
+ }
+
+ if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
+ sizeof(mask->hdr.dst_addr)) == 0) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+
+ RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
+ sizeof(spec->hdr.dst_addr));
+ rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
+ sizeof(efx_spec->efs_loc_host));
+ } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
+ sizeof(mask->hdr.dst_addr))) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.proto == supp_mask.hdr.proto) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = spec->hdr.proto;
+ } else if (mask->hdr.proto != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the IPV6 pattern item");
+ return -rte_errno;
+}
+
+/**
+ * Convert TCP item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only source and destination ports fields
+ * are supported. If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_tcp(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_tcp *spec = NULL;
+ const struct rte_flow_item_tcp *mask = NULL;
+ const struct rte_flow_item_tcp supp_mask = {
+ .hdr = {
+ .src_port = 0xffff,
+ .dst_port = 0xffff,
+ }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_tcp_mask,
+ sizeof(struct rte_flow_item_tcp),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Filtering by TCP source and destination ports requires
+ * the appropriate IP_PROTO in hardware filters
+ */
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
+ } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "IP proto in pattern with TCP item should be appropriate");
+ return -rte_errno;
+ }
+
+ if (spec == NULL)
+ return 0;
+
+ /*
+ * Source and destination ports are in big-endian byte order in item and
+ * in little-endian in efx_spec, so byte swap is used
+ */
+ if (mask->hdr.src_port == supp_mask.hdr.src_port) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
+ efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
+ } else if (mask->hdr.src_port != 0) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+ efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
+ } else if (mask->hdr.dst_port != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the TCP pattern item");
+ return -rte_errno;
+}
+
+/**
+ * Convert UDP item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only source and destination ports fields
+ * are supported. If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_udp(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_udp *spec = NULL;
+ const struct rte_flow_item_udp *mask = NULL;
+ const struct rte_flow_item_udp supp_mask = {
+ .hdr = {
+ .src_port = 0xffff,
+ .dst_port = 0xffff,
+ }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_udp_mask,
+ sizeof(struct rte_flow_item_udp),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Filtering by UDP source and destination ports requires
+ * the appropriate IP_PROTO in hardware filters
+ */
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
+ } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "IP proto in pattern with UDP item should be appropriate");
+ return -rte_errno;
+ }
+
+ if (spec == NULL)
+ return 0;
+
+ /*
+ * Source and destination ports are in big-endian byte order in item and
+ * in little-endian in efx_spec, so byte swap is used
+ */
+ if (mask->hdr.src_port == supp_mask.hdr.src_port) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
+ efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
+ } else if (mask->hdr.src_port != 0) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+ efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
+ } else if (mask->hdr.dst_port != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the UDP pattern item");
+ return -rte_errno;
+}
+
+static const struct sfc_flow_item sfc_flow_items[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_VOID,
+ .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
+ .layer = SFC_FLOW_ITEM_ANY_LAYER,
+ .parse = sfc_flow_parse_void,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .prev_layer = SFC_FLOW_ITEM_START_LAYER,
+ .layer = SFC_FLOW_ITEM_L2,
+ .parse = sfc_flow_parse_eth,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ .prev_layer = SFC_FLOW_ITEM_L2,
+ .layer = SFC_FLOW_ITEM_L2,
+ .parse = sfc_flow_parse_vlan,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .prev_layer = SFC_FLOW_ITEM_L2,
+ .layer = SFC_FLOW_ITEM_L3,
+ .parse = sfc_flow_parse_ipv4,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .prev_layer = SFC_FLOW_ITEM_L2,
+ .layer = SFC_FLOW_ITEM_L3,
+ .parse = sfc_flow_parse_ipv6,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .prev_layer = SFC_FLOW_ITEM_L3,
+ .layer = SFC_FLOW_ITEM_L4,
+ .parse = sfc_flow_parse_tcp,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .prev_layer = SFC_FLOW_ITEM_L3,
+ .layer = SFC_FLOW_ITEM_L4,
+ .parse = sfc_flow_parse_udp,
+ },
+};
+
+/*
+ * Protocol-independent flow API support
+ */
+static int
+sfc_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ if (attr == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR, NULL,
+ "NULL attribute");
+ return -rte_errno;
+ }
+ if (attr->group != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
+ "Groups are not supported");
+ return -rte_errno;
+ }
+ if (attr->priority != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
+ "Priorities are not supported");
+ return -rte_errno;
+ }
+ if (attr->egress != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
+ "Egress is not supported");
+ return -rte_errno;
+ }
+ if (attr->ingress == 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
+ "Only ingress is supported");
+ return -rte_errno;
+ }
+
+ flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
+ flow->spec.efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
+
+ return 0;
+}
+
+/* Get item from array sfc_flow_items */
+static const struct sfc_flow_item *
+sfc_flow_get_item(enum rte_flow_item_type type)
+{
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
+ if (sfc_flow_items[i].type == type)
+ return &sfc_flow_items[i];
+
+ return NULL;
+}
+
+static int
+sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int rc;
+ unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
+ const struct sfc_flow_item *item;
+
+ if (pattern == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
+ "NULL pattern");
+ return -rte_errno;
+ }
+
+ for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+ item = sfc_flow_get_item(pattern->type);
+ if (item == NULL) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+ "Unsupported pattern item");
+ return -rte_errno;
+ }
+
+ /*
+ * Omitting one or several protocol layers at the beginning
+ * of pattern is supported
+ */
+ if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
+ prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
+ item->prev_layer != prev_layer) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+ "Unexpected sequence of pattern items");
+ return -rte_errno;
+ }
+
+ rc = item->parse(pattern, &flow->spec, error);
+ if (rc != 0)
+ return rc;
+
+ if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
+ prev_layer = item->layer;
+ }
+
+ return 0;
+}
+
+static int
+sfc_flow_parse_queue(struct sfc_adapter *sa,
+ const struct rte_flow_action_queue *queue,
+ struct rte_flow *flow)
+{
+ struct sfc_rxq *rxq;
+
+ if (queue->index >= sa->rxq_count)
+ return -EINVAL;
+
+ rxq = sa->rxq_info[queue->index].rxq;
+ flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
+
+ return 0;
+}
+
+static int
+sfc_flow_parse_actions(struct sfc_adapter *sa,
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int rc;
+ boolean_t is_specified = B_FALSE;
+
+ if (actions == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
+ "NULL actions");
+ return -rte_errno;
+ }
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ rc = sfc_flow_parse_queue(sa, actions->conf, flow);
+ if (rc != 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Bad QUEUE action");
+ return -rte_errno;
+ }
+
+ is_specified = B_TRUE;
+ break;
+
+ default:
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Action is not supported");
+ return -rte_errno;
+ }
+ }
+
+ if (!is_specified) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
+ "Action is unspecified");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+sfc_flow_parse(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ memset(&flow->spec, 0, sizeof(flow->spec));
+
+ rc = sfc_flow_parse_attr(attr, flow, error);
+ if (rc != 0)
+ goto fail_bad_value;
+
+ rc = sfc_flow_parse_pattern(pattern, flow, error);
+ if (rc != 0)
+ goto fail_bad_value;
+
+ rc = sfc_flow_parse_actions(sa, actions, flow, error);
+ if (rc != 0)
+ goto fail_bad_value;
+
+ if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Flow rule pattern is not supported");
+ return -rte_errno;
+ }
+
+fail_bad_value:
+ return rc;
+}
+
+static int
+sfc_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow flow;
+
+ return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
+}
+
+static struct rte_flow *
+sfc_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_flow *flow = NULL;
+ int rc;
+
+ flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
+ if (flow == NULL) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to allocate memory");
+ goto fail_no_mem;
+ }
+
+ rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
+ if (rc != 0)
+ goto fail_bad_value;
+
+ TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
+
+ sfc_adapter_lock(sa);
+
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = efx_filter_insert(sa->nic, &flow->spec);
+ if (rc != 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to insert filter");
+ goto fail_filter_insert;
+ }
+ }
+
+ sfc_adapter_unlock(sa);
+
+ return flow;
+
+fail_filter_insert:
+ TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
+
+fail_bad_value:
+ rte_free(flow);
+ sfc_adapter_unlock(sa);
+
+fail_no_mem:
+ return NULL;
+}
+
+static int
+sfc_flow_remove(struct sfc_adapter *sa,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int rc = 0;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = efx_filter_remove(sa->nic, &flow->spec);
+ if (rc != 0)
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to destroy flow rule");
+ }
+
+ TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
+ rte_free(flow);
+
+ return rc;
+}
+
+static int
+sfc_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_flow *flow_ptr;
+ int rc = EINVAL;
+
+ sfc_adapter_lock(sa);
+
+ TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
+ if (flow_ptr == flow)
+ rc = 0;
+ }
+ if (rc != 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to find flow rule to destroy");
+ goto fail_bad_value;
+ }
+
+ rc = sfc_flow_remove(sa, flow, error);
+
+fail_bad_value:
+ sfc_adapter_unlock(sa);
+
+ return -rc;
+}
+
+static int
+sfc_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_flow *flow;
+ int rc = 0;
+ int ret = 0;
+
+ sfc_adapter_lock(sa);
+
+ while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
+ rc = sfc_flow_remove(sa, flow, error);
+ if (rc != 0)
+ ret = rc;
+ }
+
+ sfc_adapter_unlock(sa);
+
+ return -ret;
+}
+
+const struct rte_flow_ops sfc_flow_ops = {
+ .validate = sfc_flow_validate,
+ .create = sfc_flow_create,
+ .destroy = sfc_flow_destroy,
+ .flush = sfc_flow_flush,
+ .query = NULL,
+};
+
+void
+sfc_flow_init(struct sfc_adapter *sa)
+{
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ TAILQ_INIT(&sa->filter.flow_list);
+}
+
+void
+sfc_flow_fini(struct sfc_adapter *sa)
+{
+ struct rte_flow *flow;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
+ TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
+ rte_free(flow);
+ }
+}
+
+void
+sfc_flow_stop(struct sfc_adapter *sa)
+{
+ struct rte_flow *flow;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
+ efx_filter_remove(sa->nic, &flow->spec);
+}
+
+int
+sfc_flow_start(struct sfc_adapter *sa)
+{
+ struct rte_flow *flow;
+ int rc = 0;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
+ rc = efx_filter_insert(sa->nic, &flow->spec);
+ if (rc != 0)
+ goto fail_bad_flow;
+ }
+
+ sfc_log_init(sa, "done");
+
+fail_bad_flow:
+ return rc;
+}
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_flow.h b/src/seastar/dpdk/drivers/net/sfc/sfc_flow.h
new file mode 100644
index 00000000..bfc34364
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_flow.h
@@ -0,0 +1,64 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_FLOW_H
+#define _SFC_FLOW_H
+
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "efx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* PMD-specific definition of the opaque type from rte_flow.h */
+struct rte_flow {
+ efx_filter_spec_t spec; /* filter specification */
+ TAILQ_ENTRY(rte_flow) entries; /* flow list entries */
+};
+
+TAILQ_HEAD(sfc_flow_list, rte_flow);
+
+extern const struct rte_flow_ops sfc_flow_ops;
+
+struct sfc_adapter;
+
+void sfc_flow_init(struct sfc_adapter *sa);
+void sfc_flow_fini(struct sfc_adapter *sa);
+int sfc_flow_start(struct sfc_adapter *sa);
+void sfc_flow_stop(struct sfc_adapter *sa);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_FLOW_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_intr.c b/src/seastar/dpdk/drivers/net/sfc/sfc_intr.c
new file mode 100644
index 00000000..7eb4b86c
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_intr.c
@@ -0,0 +1,342 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * At the momemt of writing DPDK v16.07 has notion of two types of
+ * interrupts: LSC (link status change) and RXQ (receive indication).
+ * It allows to register interrupt callback for entire device which is
+ * not intended to be used for receive indication (i.e. link status
+ * change indication only). The handler has no information which HW
+ * interrupt has triggered it, so we don't know which event queue should
+ * be polled/reprimed (except qmask in the case of legacy line interrupt).
+ */
+
+#include <rte_common.h>
+#include <rte_interrupts.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+
+static void
+sfc_intr_handle_mgmt_evq(struct sfc_adapter *sa)
+{
+ struct sfc_evq *evq;
+
+ rte_spinlock_lock(&sa->mgmt_evq_lock);
+
+ evq = sa->mgmt_evq;
+
+ if (evq->init_state != SFC_EVQ_STARTED) {
+ sfc_log_init(sa, "interrupt on stopped EVQ %u", evq->evq_index);
+ } else {
+ sfc_ev_qpoll(evq);
+
+ if (sfc_ev_qprime(evq) != 0)
+ sfc_err(sa, "cannot prime EVQ %u", evq->evq_index);
+ }
+
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+}
+
+static void
+sfc_intr_line_handler(void *cb_arg)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)cb_arg;
+ efx_nic_t *enp = sa->nic;
+ boolean_t fatal;
+ uint32_t qmask;
+ unsigned int lsc_seq = sa->port.lsc_seq;
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+
+ sfc_log_init(sa, "entry");
+
+ if (sa->state != SFC_ADAPTER_STARTED &&
+ sa->state != SFC_ADAPTER_STARTING &&
+ sa->state != SFC_ADAPTER_STOPPING) {
+ sfc_log_init(sa,
+ "interrupt on stopped adapter, don't reenable");
+ goto exit;
+ }
+
+ efx_intr_status_line(enp, &fatal, &qmask);
+ if (fatal) {
+ (void)efx_intr_disable(enp);
+ (void)efx_intr_fatal(enp);
+ sfc_err(sa, "fatal, interrupts disabled");
+ goto exit;
+ }
+
+ if (qmask & (1 << sa->mgmt_evq_index))
+ sfc_intr_handle_mgmt_evq(sa);
+
+ if (rte_intr_enable(&pci_dev->intr_handle) != 0)
+ sfc_err(sa, "cannot reenable interrupts");
+
+ sfc_log_init(sa, "done");
+
+exit:
+ if (lsc_seq != sa->port.lsc_seq) {
+ sfc_info(sa, "link status change event: link %s",
+ sa->eth_dev->data->dev_link.link_status ?
+ "UP" : "DOWN");
+ _rte_eth_dev_callback_process(sa->eth_dev,
+ RTE_ETH_EVENT_INTR_LSC, NULL);
+ }
+}
+
+static void
+sfc_intr_message_handler(void *cb_arg)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)cb_arg;
+ efx_nic_t *enp = sa->nic;
+ boolean_t fatal;
+ unsigned int lsc_seq = sa->port.lsc_seq;
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+
+ sfc_log_init(sa, "entry");
+
+ if (sa->state != SFC_ADAPTER_STARTED &&
+ sa->state != SFC_ADAPTER_STARTING &&
+ sa->state != SFC_ADAPTER_STOPPING) {
+ sfc_log_init(sa, "adapter not-started, don't reenable");
+ goto exit;
+ }
+
+ efx_intr_status_message(enp, sa->mgmt_evq_index, &fatal);
+ if (fatal) {
+ (void)efx_intr_disable(enp);
+ (void)efx_intr_fatal(enp);
+ sfc_err(sa, "fatal, interrupts disabled");
+ goto exit;
+ }
+
+ sfc_intr_handle_mgmt_evq(sa);
+
+ if (rte_intr_enable(&pci_dev->intr_handle) != 0)
+ sfc_err(sa, "cannot reenable interrupts");
+
+ sfc_log_init(sa, "done");
+
+exit:
+ if (lsc_seq != sa->port.lsc_seq) {
+ sfc_info(sa, "link status change event");
+ _rte_eth_dev_callback_process(sa->eth_dev,
+ RTE_ETH_EVENT_INTR_LSC, NULL);
+ }
+}
+
+int
+sfc_intr_start(struct sfc_adapter *sa)
+{
+ struct sfc_intr *intr = &sa->intr;
+ struct rte_intr_handle *intr_handle;
+ struct rte_pci_device *pci_dev;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ /*
+ * The EFX common code event queue module depends on the interrupt
+ * module. Ensure that the interrupt module is always initialized
+ * (even if interrupts are not used). Status memory is required
+ * for Siena only and may be NULL for EF10.
+ */
+ sfc_log_init(sa, "efx_intr_init");
+ rc = efx_intr_init(sa->nic, intr->type, NULL);
+ if (rc != 0)
+ goto fail_intr_init;
+
+ pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+ intr_handle = &pci_dev->intr_handle;
+
+ if (intr->handler != NULL) {
+ sfc_log_init(sa, "rte_intr_callback_register");
+ rc = rte_intr_callback_register(intr_handle, intr->handler,
+ (void *)sa);
+ if (rc != 0) {
+ sfc_err(sa,
+ "cannot register interrupt handler (rc=%d)",
+ rc);
+ /*
+ * Convert error code from negative returned by RTE API
+ * to positive used in the driver.
+ */
+ rc = -rc;
+ goto fail_rte_intr_cb_reg;
+ }
+
+ sfc_log_init(sa, "rte_intr_enable");
+ rc = rte_intr_enable(intr_handle);
+ if (rc != 0) {
+ sfc_err(sa, "cannot enable interrupts (rc=%d)", rc);
+ /*
+ * Convert error code from negative returned by RTE API
+ * to positive used in the driver.
+ */
+ rc = -rc;
+ goto fail_rte_intr_enable;
+ }
+
+ sfc_log_init(sa, "efx_intr_enable");
+ efx_intr_enable(sa->nic);
+ }
+
+ sfc_log_init(sa, "done type=%u max_intr=%d nb_efd=%u vec=%p",
+ intr_handle->type, intr_handle->max_intr,
+ intr_handle->nb_efd, intr_handle->intr_vec);
+ return 0;
+
+fail_rte_intr_enable:
+ rte_intr_callback_unregister(intr_handle, intr->handler, (void *)sa);
+
+fail_rte_intr_cb_reg:
+ efx_intr_fini(sa->nic);
+
+fail_intr_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_intr_stop(struct sfc_adapter *sa)
+{
+ struct sfc_intr *intr = &sa->intr;
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+
+ sfc_log_init(sa, "entry");
+
+ if (intr->handler != NULL) {
+ struct rte_intr_handle *intr_handle;
+ int rc;
+
+ efx_intr_disable(sa->nic);
+
+ intr_handle = &pci_dev->intr_handle;
+ if (rte_intr_disable(intr_handle) != 0)
+ sfc_err(sa, "cannot disable interrupts");
+
+ while ((rc = rte_intr_callback_unregister(intr_handle,
+ intr->handler, (void *)sa)) == -EAGAIN)
+ ;
+ if (rc != 1)
+ sfc_err(sa,
+ "cannot unregister interrupt handler %d",
+ rc);
+ }
+
+ efx_intr_fini(sa->nic);
+
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_intr_configure(struct sfc_adapter *sa)
+{
+ struct sfc_intr *intr = &sa->intr;
+
+ sfc_log_init(sa, "entry");
+
+ intr->handler = NULL;
+ intr->lsc_intr = (sa->eth_dev->data->dev_conf.intr_conf.lsc != 0);
+ if (!intr->lsc_intr) {
+ sfc_info(sa, "LSC tracking using interrupts is disabled");
+ goto done;
+ }
+
+ switch (intr->type) {
+ case EFX_INTR_MESSAGE:
+ intr->handler = sfc_intr_message_handler;
+ break;
+ case EFX_INTR_LINE:
+ intr->handler = sfc_intr_line_handler;
+ break;
+ case EFX_INTR_INVALID:
+ sfc_warn(sa, "interrupts are not supported");
+ break;
+ default:
+ sfc_panic(sa, "unexpected EFX interrupt type %u\n", intr->type);
+ break;
+ }
+
+done:
+ sfc_log_init(sa, "done");
+ return 0;
+}
+
+void
+sfc_intr_close(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_intr_attach(struct sfc_adapter *sa)
+{
+ struct sfc_intr *intr = &sa->intr;
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+
+ sfc_log_init(sa, "entry");
+
+ switch (pci_dev->intr_handle.type) {
+#ifdef RTE_EXEC_ENV_LINUXAPP
+ case RTE_INTR_HANDLE_UIO_INTX:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ intr->type = EFX_INTR_LINE;
+ break;
+ case RTE_INTR_HANDLE_UIO:
+ case RTE_INTR_HANDLE_VFIO_MSI:
+ case RTE_INTR_HANDLE_VFIO_MSIX:
+ intr->type = EFX_INTR_MESSAGE;
+ break;
+#endif
+ default:
+ intr->type = EFX_INTR_INVALID;
+ break;
+ }
+
+ sfc_log_init(sa, "done");
+ return 0;
+}
+
+void
+sfc_intr_detach(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ sa->intr.type = EFX_INTR_INVALID;
+
+ sfc_log_init(sa, "done");
+}
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_kvargs.c b/src/seastar/dpdk/drivers/net/sfc/sfc_kvargs.c
new file mode 100644
index 00000000..7bcd5951
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_kvargs.c
@@ -0,0 +1,145 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdbool.h>
+#include <strings.h>
+
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
+
+#include "sfc.h"
+#include "sfc_kvargs.h"
+
+int
+sfc_kvargs_parse(struct sfc_adapter *sa)
+{
+ struct rte_eth_dev *eth_dev = (sa)->eth_dev;
+ struct rte_devargs *devargs = eth_dev->device->devargs;
+ const char **params = (const char *[]){
+ SFC_KVARG_STATS_UPDATE_PERIOD_MS,
+ SFC_KVARG_DEBUG_INIT,
+ SFC_KVARG_MCDI_LOGGING,
+ SFC_KVARG_PERF_PROFILE,
+ SFC_KVARG_RX_DATAPATH,
+ SFC_KVARG_TX_DATAPATH,
+ NULL,
+ };
+
+ if (devargs == NULL)
+ return 0;
+
+ sa->kvargs = rte_kvargs_parse(devargs->args, params);
+ if (sa->kvargs == NULL)
+ return EINVAL;
+
+ return 0;
+}
+
+void
+sfc_kvargs_cleanup(struct sfc_adapter *sa)
+{
+ rte_kvargs_free(sa->kvargs);
+}
+
+static int
+sfc_kvarg_match_value(const char *value, const char * const *values,
+ unsigned int n_values)
+{
+ unsigned int i;
+
+ for (i = 0; i < n_values; ++i)
+ if (strcasecmp(value, values[i]) == 0)
+ return 1;
+
+ return 0;
+}
+
+int
+sfc_kvargs_process(struct sfc_adapter *sa, const char *key_match,
+ arg_handler_t handler, void *opaque_arg)
+{
+ if (sa->kvargs == NULL)
+ return 0;
+
+ return -rte_kvargs_process(sa->kvargs, key_match, handler, opaque_arg);
+}
+
+int
+sfc_kvarg_bool_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ const char * const true_strs[] = {
+ "1", "y", "yes", "on", "true"
+ };
+ const char * const false_strs[] = {
+ "0", "n", "no", "off", "false"
+ };
+ bool *value = opaque;
+
+ if (sfc_kvarg_match_value(value_str, true_strs,
+ RTE_DIM(true_strs)))
+ *value = true;
+ else if (sfc_kvarg_match_value(value_str, false_strs,
+ RTE_DIM(false_strs)))
+ *value = false;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+sfc_kvarg_long_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ long value;
+ char *endptr;
+
+ if (!value_str || !opaque)
+ return -EINVAL;
+
+ value = strtol(value_str, &endptr, 0);
+ if (endptr == value_str)
+ return -EINVAL;
+
+ *(long *)opaque = value;
+
+ return 0;
+}
+
+int
+sfc_kvarg_string_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ *(const char **)opaque = value_str;
+
+ return 0;
+}
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_kvargs.h b/src/seastar/dpdk/drivers/net/sfc/sfc_kvargs.h
new file mode 100644
index 00000000..d9c3b1da
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_kvargs.h
@@ -0,0 +1,93 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_KVARGS_H
+#define _SFC_KVARGS_H
+
+#include <rte_kvargs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SFC_KVARG_VALUES_BOOL "[1|y|yes|on|0|n|no|off]"
+
+#define SFC_KVARG_DEBUG_INIT "debug_init"
+
+#define SFC_KVARG_MCDI_LOGGING "mcdi_logging"
+
+#define SFC_KVARG_PERF_PROFILE "perf_profile"
+
+#define SFC_KVARG_PERF_PROFILE_AUTO "auto"
+#define SFC_KVARG_PERF_PROFILE_THROUGHPUT "throughput"
+#define SFC_KVARG_PERF_PROFILE_LOW_LATENCY "low-latency"
+#define SFC_KVARG_VALUES_PERF_PROFILE \
+ "[" SFC_KVARG_PERF_PROFILE_AUTO "|" \
+ SFC_KVARG_PERF_PROFILE_THROUGHPUT "|" \
+ SFC_KVARG_PERF_PROFILE_LOW_LATENCY "]"
+
+#define SFC_KVARG_STATS_UPDATE_PERIOD_MS "stats_update_period_ms"
+
+#define SFC_KVARG_DATAPATH_EFX "efx"
+#define SFC_KVARG_DATAPATH_EF10 "ef10"
+#define SFC_KVARG_DATAPATH_EF10_SIMPLE "ef10_simple"
+
+#define SFC_KVARG_RX_DATAPATH "rx_datapath"
+#define SFC_KVARG_VALUES_RX_DATAPATH \
+ "[" SFC_KVARG_DATAPATH_EFX "|" \
+ SFC_KVARG_DATAPATH_EF10 "]"
+
+#define SFC_KVARG_TX_DATAPATH "tx_datapath"
+#define SFC_KVARG_VALUES_TX_DATAPATH \
+ "[" SFC_KVARG_DATAPATH_EFX "|" \
+ SFC_KVARG_DATAPATH_EF10 "|" \
+ SFC_KVARG_DATAPATH_EF10_SIMPLE "]"
+
+struct sfc_adapter;
+
+int sfc_kvargs_parse(struct sfc_adapter *sa);
+void sfc_kvargs_cleanup(struct sfc_adapter *sa);
+
+int sfc_kvargs_process(struct sfc_adapter *sa, const char *key_match,
+ arg_handler_t handler, void *opaque_arg);
+
+int sfc_kvarg_bool_handler(const char *key, const char *value_str,
+ void *opaque);
+int sfc_kvarg_long_handler(const char *key, const char *value_str,
+ void *opaque);
+int sfc_kvarg_string_handler(const char *key, const char *value_str,
+ void *opaque);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SFC_KVARGS_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_log.h b/src/seastar/dpdk/drivers/net/sfc/sfc_log.h
new file mode 100644
index 00000000..8a5e2302
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_log.h
@@ -0,0 +1,76 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_LOG_H_
+#define _SFC_LOG_H_
+
+/* Log PMD message, automatically add prefix and \n */
+#define SFC_LOG(sa, level, ...) \
+ do { \
+ const struct rte_eth_dev *_dev = (sa)->eth_dev; \
+ const struct rte_pci_device *_pci_dev = SFC_DEV_TO_PCI(_dev); \
+ \
+ RTE_LOG(level, PMD, \
+ RTE_FMT("sfc_efx " PCI_PRI_FMT " #%" PRIu8 ": " \
+ RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ _pci_dev->addr.domain, \
+ _pci_dev->addr.bus, \
+ _pci_dev->addr.devid, \
+ _pci_dev->addr.function, \
+ _dev->data->port_id, \
+ RTE_FMT_TAIL(__VA_ARGS__,))); \
+ } while (0)
+
+#define sfc_err(sa, ...) \
+ SFC_LOG(sa, ERR, __VA_ARGS__)
+
+#define sfc_warn(sa, ...) \
+ SFC_LOG(sa, WARNING, __VA_ARGS__)
+
+#define sfc_notice(sa, ...) \
+ SFC_LOG(sa, NOTICE, __VA_ARGS__)
+
+#define sfc_info(sa, ...) \
+ SFC_LOG(sa, INFO, __VA_ARGS__)
+
+#define sfc_log_init(sa, ...) \
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ if (_sa->debug_init) \
+ SFC_LOG(_sa, INFO, \
+ RTE_FMT("%s(): " \
+ RTE_FMT_HEAD(__VA_ARGS__,), \
+ __func__, \
+ RTE_FMT_TAIL(__VA_ARGS__,))); \
+ } while (0)
+
+#endif /* _SFC_LOG_H_ */
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_mcdi.c b/src/seastar/dpdk/drivers/net/sfc/sfc_mcdi.c
new file mode 100644
index 00000000..0faad3ed
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_mcdi.c
@@ -0,0 +1,331 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_cycles.h>
+
+#include "efx.h"
+#include "efx_mcdi.h"
+#include "efx_regs_mcdi.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+#include "sfc_kvargs.h"
+#include "sfc_ev.h"
+
+#define SFC_MCDI_POLL_INTERVAL_MIN_US 10 /* 10us in 1us units */
+#define SFC_MCDI_POLL_INTERVAL_MAX_US (US_PER_S / 10) /* 100ms in 1us units */
+#define SFC_MCDI_WATCHDOG_INTERVAL_US (10 * US_PER_S) /* 10s in 1us units */
+
+static void
+sfc_mcdi_timeout(struct sfc_adapter *sa)
+{
+ sfc_warn(sa, "MC TIMEOUT");
+
+ sfc_panic(sa, "MCDI timeout handling is not implemented\n");
+}
+
+static inline boolean_t
+sfc_mcdi_proxy_event_available(struct sfc_adapter *sa)
+{
+ struct sfc_mcdi *mcdi = &sa->mcdi;
+
+ mcdi->proxy_handle = 0;
+ mcdi->proxy_result = ETIMEDOUT;
+ sfc_ev_mgmt_qpoll(sa);
+ if (mcdi->proxy_result != ETIMEDOUT)
+ return B_TRUE;
+
+ return B_FALSE;
+}
+
+static void
+sfc_mcdi_poll(struct sfc_adapter *sa, boolean_t proxy)
+{
+ efx_nic_t *enp;
+ unsigned int delay_total;
+ unsigned int delay_us;
+ boolean_t aborted __rte_unused;
+
+ delay_total = 0;
+ delay_us = SFC_MCDI_POLL_INTERVAL_MIN_US;
+ enp = sa->nic;
+
+ do {
+ boolean_t poll_completed;
+
+ poll_completed = (proxy) ? sfc_mcdi_proxy_event_available(sa) :
+ efx_mcdi_request_poll(enp);
+ if (poll_completed)
+ return;
+
+ if (delay_total > SFC_MCDI_WATCHDOG_INTERVAL_US) {
+ if (!proxy) {
+ aborted = efx_mcdi_request_abort(enp);
+ SFC_ASSERT(aborted);
+ sfc_mcdi_timeout(sa);
+ }
+
+ return;
+ }
+
+ rte_delay_us(delay_us);
+
+ delay_total += delay_us;
+
+ /* Exponentially back off the poll frequency */
+ RTE_BUILD_BUG_ON(SFC_MCDI_POLL_INTERVAL_MAX_US > UINT_MAX / 2);
+ delay_us *= 2;
+ if (delay_us > SFC_MCDI_POLL_INTERVAL_MAX_US)
+ delay_us = SFC_MCDI_POLL_INTERVAL_MAX_US;
+
+ } while (1);
+}
+
+static void
+sfc_mcdi_execute(void *arg, efx_mcdi_req_t *emrp)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+ struct sfc_mcdi *mcdi = &sa->mcdi;
+ uint32_t proxy_handle;
+
+ rte_spinlock_lock(&mcdi->lock);
+
+ SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED);
+
+ efx_mcdi_request_start(sa->nic, emrp, B_FALSE);
+ sfc_mcdi_poll(sa, B_FALSE);
+
+ if (efx_mcdi_get_proxy_handle(sa->nic, emrp, &proxy_handle) == 0) {
+ /*
+ * Authorization is required for the MCDI request;
+ * wait for an MCDI proxy response event to bring
+ * a non-zero proxy handle (should be the same as
+ * the value obtained above) and operation status
+ */
+ sfc_mcdi_poll(sa, B_TRUE);
+
+ if ((mcdi->proxy_handle != 0) &&
+ (mcdi->proxy_handle != proxy_handle)) {
+ sfc_err(sa, "Unexpected MCDI proxy event");
+ emrp->emr_rc = EFAULT;
+ } else if (mcdi->proxy_result == 0) {
+ /*
+ * Authorization succeeded; re-issue the original
+ * request and poll for an ordinary MCDI response
+ */
+ efx_mcdi_request_start(sa->nic, emrp, B_FALSE);
+ sfc_mcdi_poll(sa, B_FALSE);
+ } else {
+ emrp->emr_rc = mcdi->proxy_result;
+ sfc_err(sa, "MCDI proxy authorization failed "
+ "(handle=%08x, result=%d)",
+ proxy_handle, mcdi->proxy_result);
+ }
+ }
+
+ rte_spinlock_unlock(&mcdi->lock);
+}
+
+static void
+sfc_mcdi_ev_cpl(void *arg)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+ struct sfc_mcdi *mcdi __rte_unused;
+
+ mcdi = &sa->mcdi;
+ SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED);
+
+ /* MCDI is polled, completions are not expected */
+ SFC_ASSERT(0);
+}
+
+static void
+sfc_mcdi_exception(void *arg, efx_mcdi_exception_t eme)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+
+ sfc_warn(sa, "MC %s",
+ (eme == EFX_MCDI_EXCEPTION_MC_REBOOT) ? "REBOOT" :
+ (eme == EFX_MCDI_EXCEPTION_MC_BADASSERT) ? "BADASSERT" : "UNKNOWN");
+
+ sfc_panic(sa, "MCDI exceptions handling is not implemented\n");
+}
+
+#define SFC_MCDI_LOG_BUF_SIZE 128
+
+static size_t
+sfc_mcdi_do_log(const struct sfc_adapter *sa,
+ char *buffer, void *data, size_t data_size,
+ size_t pfxsize, size_t position)
+{
+ uint32_t *words = data;
+ /* Space separator plus 2 characters per byte */
+ const size_t word_str_space = 1 + 2 * sizeof(*words);
+ size_t i;
+
+ for (i = 0; i < data_size; i += sizeof(*words)) {
+ if (position + word_str_space >=
+ SFC_MCDI_LOG_BUF_SIZE) {
+ /* Flush at SFC_MCDI_LOG_BUF_SIZE with backslash
+ * at the end which is required by netlogdecode.
+ */
+ buffer[position] = '\0';
+ sfc_info(sa, "%s \\", buffer);
+ /* Preserve prefix for the next log message */
+ position = pfxsize;
+ }
+ position += snprintf(buffer + position,
+ SFC_MCDI_LOG_BUF_SIZE - position,
+ " %08x", *words);
+ words++;
+ }
+ return position;
+}
+
+static void
+sfc_mcdi_logger(void *arg, efx_log_msg_t type,
+ void *header, size_t header_size,
+ void *data, size_t data_size)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+ char buffer[SFC_MCDI_LOG_BUF_SIZE];
+ size_t pfxsize;
+ size_t start;
+
+ if (!sa->mcdi.logging)
+ return;
+
+ /* The format including prefix added by sfc_info() is the format
+ * consumed by the Solarflare netlogdecode tool.
+ */
+ pfxsize = snprintf(buffer, sizeof(buffer), "MCDI RPC %s:",
+ type == EFX_LOG_MCDI_REQUEST ? "REQ" :
+ type == EFX_LOG_MCDI_RESPONSE ? "RESP" : "???");
+ start = sfc_mcdi_do_log(sa, buffer, header, header_size,
+ pfxsize, pfxsize);
+ start = sfc_mcdi_do_log(sa, buffer, data, data_size, pfxsize, start);
+ if (start != pfxsize) {
+ buffer[start] = '\0';
+ sfc_info(sa, "%s", buffer);
+ }
+}
+
+static void
+sfc_mcdi_ev_proxy_response(void *arg, uint32_t handle, efx_rc_t result)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+ struct sfc_mcdi *mcdi = &sa->mcdi;
+
+ mcdi->proxy_handle = handle;
+ mcdi->proxy_result = result;
+}
+
+int
+sfc_mcdi_init(struct sfc_adapter *sa)
+{
+ struct sfc_mcdi *mcdi;
+ size_t max_msg_size;
+ efx_mcdi_transport_t *emtp;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ mcdi = &sa->mcdi;
+
+ SFC_ASSERT(mcdi->state == SFC_MCDI_UNINITIALIZED);
+
+ rte_spinlock_init(&mcdi->lock);
+
+ mcdi->state = SFC_MCDI_INITIALIZED;
+
+ max_msg_size = sizeof(uint32_t) + MCDI_CTL_SDU_LEN_MAX_V2;
+ rc = sfc_dma_alloc(sa, "mcdi", 0, max_msg_size, sa->socket_id,
+ &mcdi->mem);
+ if (rc != 0)
+ goto fail_dma_alloc;
+
+ /* Convert negative error to positive used in the driver */
+ rc = sfc_kvargs_process(sa, SFC_KVARG_MCDI_LOGGING,
+ sfc_kvarg_bool_handler, &mcdi->logging);
+ if (rc != 0)
+ goto fail_kvargs_process;
+
+ emtp = &mcdi->transport;
+ emtp->emt_context = sa;
+ emtp->emt_dma_mem = &mcdi->mem;
+ emtp->emt_execute = sfc_mcdi_execute;
+ emtp->emt_ev_cpl = sfc_mcdi_ev_cpl;
+ emtp->emt_exception = sfc_mcdi_exception;
+ emtp->emt_logger = sfc_mcdi_logger;
+ emtp->emt_ev_proxy_response = sfc_mcdi_ev_proxy_response;
+
+ sfc_log_init(sa, "init MCDI");
+ rc = efx_mcdi_init(sa->nic, emtp);
+ if (rc != 0)
+ goto fail_mcdi_init;
+
+ return 0;
+
+fail_mcdi_init:
+ memset(emtp, 0, sizeof(*emtp));
+
+fail_kvargs_process:
+ sfc_dma_free(sa, &mcdi->mem);
+
+fail_dma_alloc:
+ mcdi->state = SFC_MCDI_UNINITIALIZED;
+ return rc;
+}
+
+void
+sfc_mcdi_fini(struct sfc_adapter *sa)
+{
+ struct sfc_mcdi *mcdi;
+ efx_mcdi_transport_t *emtp;
+
+ sfc_log_init(sa, "entry");
+
+ mcdi = &sa->mcdi;
+ emtp = &mcdi->transport;
+
+ rte_spinlock_lock(&mcdi->lock);
+
+ SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED);
+ mcdi->state = SFC_MCDI_UNINITIALIZED;
+
+ sfc_log_init(sa, "fini MCDI");
+ efx_mcdi_fini(sa->nic);
+ memset(emtp, 0, sizeof(*emtp));
+
+ rte_spinlock_unlock(&mcdi->lock);
+
+ sfc_dma_free(sa, &mcdi->mem);
+}
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_port.c b/src/seastar/dpdk/drivers/net/sfc/sfc_port.c
new file mode 100644
index 00000000..ee96bcde
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_port.c
@@ -0,0 +1,475 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+#include "sfc_kvargs.h"
+
+/** Default MAC statistics update period is 1 second */
+#define SFC_MAC_STATS_UPDATE_PERIOD_MS_DEF MS_PER_S
+
+/** The number of microseconds to sleep on attempt to get statistics update */
+#define SFC_MAC_STATS_UPDATE_RETRY_INTERVAL_US 10
+
+/** The number of attempts to await arrival of freshly generated statistics */
+#define SFC_MAC_STATS_UPDATE_NB_ATTEMPTS 50
+
+/**
+ * Update MAC statistics in the buffer.
+ *
+ * @param sa Adapter
+ *
+ * @return Status code
+ * @retval 0 Success
+ * @retval EAGAIN Try again
+ * @retval ENOMEM Memory allocation failure
+ */
+int
+sfc_port_update_mac_stats(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ efsys_mem_t *esmp = &port->mac_stats_dma_mem;
+ uint32_t *genp = NULL;
+ uint32_t gen_old;
+ unsigned int nb_attempts = 0;
+ int rc;
+
+ SFC_ASSERT(rte_spinlock_is_locked(&port->mac_stats_lock));
+
+ if (sa->state != SFC_ADAPTER_STARTED)
+ return EINVAL;
+
+ /*
+ * If periodic statistics DMA'ing is off or if not supported,
+ * make a manual request and keep an eye on timer if need be
+ */
+ if (!port->mac_stats_periodic_dma_supported ||
+ (port->mac_stats_update_period_ms == 0)) {
+ if (port->mac_stats_update_period_ms != 0) {
+ uint64_t timestamp = sfc_get_system_msecs();
+
+ if ((timestamp -
+ port->mac_stats_last_request_timestamp) <
+ port->mac_stats_update_period_ms)
+ return 0;
+
+ port->mac_stats_last_request_timestamp = timestamp;
+ }
+
+ rc = efx_mac_stats_upload(sa->nic, esmp);
+ if (rc != 0)
+ return rc;
+
+ genp = &port->mac_stats_update_generation;
+ gen_old = *genp;
+ }
+
+ do {
+ if (nb_attempts > 0)
+ rte_delay_us(SFC_MAC_STATS_UPDATE_RETRY_INTERVAL_US);
+
+ rc = efx_mac_stats_update(sa->nic, esmp,
+ port->mac_stats_buf, genp);
+ if (rc != 0)
+ return rc;
+
+ } while ((genp != NULL) && (*genp == gen_old) &&
+ (++nb_attempts < SFC_MAC_STATS_UPDATE_NB_ATTEMPTS));
+
+ return 0;
+}
+
+int
+sfc_port_reset_mac_stats(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ int rc;
+
+ rte_spinlock_lock(&port->mac_stats_lock);
+ rc = efx_mac_stats_clear(sa->nic);
+ rte_spinlock_unlock(&port->mac_stats_lock);
+
+ return rc;
+}
+
+static int
+sfc_port_init_dev_link(struct sfc_adapter *sa)
+{
+ struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
+ int rc;
+ efx_link_mode_t link_mode;
+ struct rte_eth_link current_link;
+
+ rc = efx_port_poll(sa->nic, &link_mode);
+ if (rc != 0)
+ return rc;
+
+ sfc_port_link_mode_to_info(link_mode, &current_link);
+
+ EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
+ rte_atomic64_set((rte_atomic64_t *)dev_link,
+ *(uint64_t *)&current_link);
+
+ return 0;
+}
+
+int
+sfc_port_start(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ int rc;
+ uint32_t phy_adv_cap;
+ const uint32_t phy_pause_caps =
+ ((1u << EFX_PHY_CAP_PAUSE) | (1u << EFX_PHY_CAP_ASYM));
+
+ sfc_log_init(sa, "entry");
+
+ sfc_log_init(sa, "init filters");
+ rc = efx_filter_init(sa->nic);
+ if (rc != 0)
+ goto fail_filter_init;
+
+ sfc_log_init(sa, "init port");
+ rc = efx_port_init(sa->nic);
+ if (rc != 0)
+ goto fail_port_init;
+
+ sfc_log_init(sa, "set flow control to %#x autoneg=%u",
+ port->flow_ctrl, port->flow_ctrl_autoneg);
+ rc = efx_mac_fcntl_set(sa->nic, port->flow_ctrl,
+ port->flow_ctrl_autoneg);
+ if (rc != 0)
+ goto fail_mac_fcntl_set;
+
+ /* Preserve pause capabilities set by above efx_mac_fcntl_set() */
+ efx_phy_adv_cap_get(sa->nic, EFX_PHY_CAP_CURRENT, &phy_adv_cap);
+ SFC_ASSERT((port->phy_adv_cap & phy_pause_caps) == 0);
+ phy_adv_cap = port->phy_adv_cap | (phy_adv_cap & phy_pause_caps);
+
+ sfc_log_init(sa, "set phy adv caps to %#x", phy_adv_cap);
+ rc = efx_phy_adv_cap_set(sa->nic, phy_adv_cap);
+ if (rc != 0)
+ goto fail_phy_adv_cap_set;
+
+ sfc_log_init(sa, "set MAC PDU %u", (unsigned int)port->pdu);
+ rc = efx_mac_pdu_set(sa->nic, port->pdu);
+ if (rc != 0)
+ goto fail_mac_pdu_set;
+
+ sfc_log_init(sa, "set MAC address");
+ rc = efx_mac_addr_set(sa->nic,
+ sa->eth_dev->data->mac_addrs[0].addr_bytes);
+ if (rc != 0)
+ goto fail_mac_addr_set;
+
+ sfc_log_init(sa, "set MAC filters");
+ port->promisc = (sa->eth_dev->data->promiscuous != 0) ?
+ B_TRUE : B_FALSE;
+ port->allmulti = (sa->eth_dev->data->all_multicast != 0) ?
+ B_TRUE : B_FALSE;
+ rc = sfc_set_rx_mode(sa);
+ if (rc != 0)
+ goto fail_mac_filter_set;
+
+ sfc_log_init(sa, "set multicast address list");
+ rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
+ port->nb_mcast_addrs);
+ if (rc != 0)
+ goto fail_mcast_address_list_set;
+
+ if (port->mac_stats_reset_pending) {
+ rc = sfc_port_reset_mac_stats(sa);
+ if (rc != 0)
+ sfc_err(sa, "statistics reset failed (requested "
+ "before the port was started)");
+
+ port->mac_stats_reset_pending = B_FALSE;
+ }
+
+ efx_mac_stats_get_mask(sa->nic, port->mac_stats_mask,
+ sizeof(port->mac_stats_mask));
+
+ port->mac_stats_update_generation = 0;
+
+ if (port->mac_stats_update_period_ms != 0) {
+ /*
+ * Update MAC stats using periodic DMA;
+ * any positive update interval different from
+ * 1000 ms can be set only on SFN8xxx provided
+ * that FW version is 6.2.1.1033 or higher
+ */
+ sfc_log_init(sa, "request MAC stats DMA'ing");
+ rc = efx_mac_stats_periodic(sa->nic, &port->mac_stats_dma_mem,
+ port->mac_stats_update_period_ms,
+ B_FALSE);
+ if (rc == 0) {
+ port->mac_stats_periodic_dma_supported = B_TRUE;
+ } else if (rc == EOPNOTSUPP) {
+ port->mac_stats_periodic_dma_supported = B_FALSE;
+ port->mac_stats_last_request_timestamp = 0;
+ } else {
+ goto fail_mac_stats_periodic;
+ }
+ }
+
+ sfc_log_init(sa, "disable MAC drain");
+ rc = efx_mac_drain(sa->nic, B_FALSE);
+ if (rc != 0)
+ goto fail_mac_drain;
+
+ /* Synchronize link status knowledge */
+ rc = sfc_port_init_dev_link(sa);
+ if (rc != 0)
+ goto fail_port_init_dev_link;
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_port_init_dev_link:
+ (void)efx_mac_drain(sa->nic, B_TRUE);
+
+fail_mac_drain:
+ (void)efx_mac_stats_periodic(sa->nic, &port->mac_stats_dma_mem,
+ 0, B_FALSE);
+
+fail_mac_stats_periodic:
+fail_mcast_address_list_set:
+fail_mac_filter_set:
+fail_mac_addr_set:
+fail_mac_pdu_set:
+fail_phy_adv_cap_set:
+fail_mac_fcntl_set:
+ efx_port_fini(sa->nic);
+
+fail_port_init:
+ efx_filter_fini(sa->nic);
+
+fail_filter_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_port_stop(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ efx_mac_drain(sa->nic, B_TRUE);
+
+ (void)efx_mac_stats_periodic(sa->nic, &sa->port.mac_stats_dma_mem,
+ 0, B_FALSE);
+
+ efx_port_fini(sa->nic);
+ efx_filter_fini(sa->nic);
+
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_port_configure(struct sfc_adapter *sa)
+{
+ const struct rte_eth_dev_data *dev_data = sa->eth_dev->data;
+ struct sfc_port *port = &sa->port;
+
+ sfc_log_init(sa, "entry");
+
+ if (dev_data->dev_conf.rxmode.jumbo_frame)
+ port->pdu = dev_data->dev_conf.rxmode.max_rx_pkt_len;
+ else
+ port->pdu = EFX_MAC_PDU(dev_data->mtu);
+
+ return 0;
+}
+
+void
+sfc_port_close(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+}
+
+int
+sfc_port_attach(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ long kvarg_stats_update_period_ms;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ efx_phy_adv_cap_get(sa->nic, EFX_PHY_CAP_PERM, &port->phy_adv_cap_mask);
+
+ /* Enable flow control by default */
+ port->flow_ctrl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ port->flow_ctrl_autoneg = B_TRUE;
+
+ port->max_mcast_addrs = EFX_MAC_MULTICAST_LIST_MAX;
+ port->nb_mcast_addrs = 0;
+ port->mcast_addrs = rte_calloc_socket("mcast_addr_list_buf",
+ port->max_mcast_addrs,
+ EFX_MAC_ADDR_LEN, 0,
+ sa->socket_id);
+ if (port->mcast_addrs == NULL) {
+ rc = ENOMEM;
+ goto fail_mcast_addr_list_buf_alloc;
+ }
+
+ rte_spinlock_init(&port->mac_stats_lock);
+
+ rc = ENOMEM;
+ port->mac_stats_buf = rte_calloc_socket("mac_stats_buf", EFX_MAC_NSTATS,
+ sizeof(uint64_t), 0,
+ sa->socket_id);
+ if (port->mac_stats_buf == NULL)
+ goto fail_mac_stats_buf_alloc;
+
+ rc = sfc_dma_alloc(sa, "mac_stats", 0, EFX_MAC_STATS_SIZE,
+ sa->socket_id, &port->mac_stats_dma_mem);
+ if (rc != 0)
+ goto fail_mac_stats_dma_alloc;
+
+ port->mac_stats_reset_pending = B_FALSE;
+
+ kvarg_stats_update_period_ms = SFC_MAC_STATS_UPDATE_PERIOD_MS_DEF;
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_STATS_UPDATE_PERIOD_MS,
+ sfc_kvarg_long_handler,
+ &kvarg_stats_update_period_ms);
+ if ((rc == 0) &&
+ ((kvarg_stats_update_period_ms < 0) ||
+ (kvarg_stats_update_period_ms > UINT16_MAX))) {
+ sfc_err(sa, "wrong '" SFC_KVARG_STATS_UPDATE_PERIOD_MS "' "
+ "was set (%ld);", kvarg_stats_update_period_ms);
+ sfc_err(sa, "it must not be less than 0 "
+ "or greater than %" PRIu16, UINT16_MAX);
+ rc = EINVAL;
+ goto fail_kvarg_stats_update_period_ms;
+ } else if (rc != 0) {
+ goto fail_kvarg_stats_update_period_ms;
+ }
+
+ port->mac_stats_update_period_ms = kvarg_stats_update_period_ms;
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_kvarg_stats_update_period_ms:
+fail_mac_stats_dma_alloc:
+ rte_free(port->mac_stats_buf);
+fail_mac_stats_buf_alloc:
+fail_mcast_addr_list_buf_alloc:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_port_detach(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_dma_free(sa, &port->mac_stats_dma_mem);
+ rte_free(port->mac_stats_buf);
+
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_set_rx_mode(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ int rc;
+
+ rc = efx_mac_filter_set(sa->nic, port->promisc, B_TRUE,
+ port->promisc || port->allmulti, B_TRUE);
+
+ return rc;
+}
+
+void
+sfc_port_link_mode_to_info(efx_link_mode_t link_mode,
+ struct rte_eth_link *link_info)
+{
+ SFC_ASSERT(link_mode < EFX_LINK_NMODES);
+
+ memset(link_info, 0, sizeof(*link_info));
+ if ((link_mode == EFX_LINK_DOWN) || (link_mode == EFX_LINK_UNKNOWN))
+ link_info->link_status = ETH_LINK_DOWN;
+ else
+ link_info->link_status = ETH_LINK_UP;
+
+ switch (link_mode) {
+ case EFX_LINK_10HDX:
+ link_info->link_speed = ETH_SPEED_NUM_10M;
+ link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ case EFX_LINK_10FDX:
+ link_info->link_speed = ETH_SPEED_NUM_10M;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_100HDX:
+ link_info->link_speed = ETH_SPEED_NUM_100M;
+ link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ case EFX_LINK_100FDX:
+ link_info->link_speed = ETH_SPEED_NUM_100M;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_1000HDX:
+ link_info->link_speed = ETH_SPEED_NUM_1G;
+ link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ case EFX_LINK_1000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_1G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_10000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_10G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_40000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_40G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ default:
+ SFC_ASSERT(B_FALSE);
+ /* FALLTHROUGH */
+ case EFX_LINK_UNKNOWN:
+ case EFX_LINK_DOWN:
+ link_info->link_speed = ETH_SPEED_NUM_NONE;
+ link_info->link_duplex = 0;
+ break;
+ }
+
+ link_info->link_autoneg = ETH_LINK_AUTONEG;
+}
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_rx.c b/src/seastar/dpdk/drivers/net/sfc/sfc_rx.c
new file mode 100644
index 00000000..2ecd6f26
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_rx.c
@@ -0,0 +1,1327 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_mempool.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+#include "sfc_rx.h"
+#include "sfc_kvargs.h"
+#include "sfc_tweak.h"
+
+/*
+ * Maximum number of Rx queue flush attempt in the case of failure or
+ * flush timeout
+ */
+#define SFC_RX_QFLUSH_ATTEMPTS (3)
+
+/*
+ * Time to wait between event queue polling attempts when waiting for Rx
+ * queue flush done or failed events.
+ */
+#define SFC_RX_QFLUSH_POLL_WAIT_MS (1)
+
+/*
+ * Maximum number of event queue polling attempts when waiting for Rx queue
+ * flush done or failed events. It defines Rx queue flush attempt timeout
+ * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
+ */
+#define SFC_RX_QFLUSH_POLL_ATTEMPTS (2000)
+
+void
+sfc_rx_qflush_done(struct sfc_rxq *rxq)
+{
+ rxq->state |= SFC_RXQ_FLUSHED;
+ rxq->state &= ~SFC_RXQ_FLUSHING;
+}
+
+void
+sfc_rx_qflush_failed(struct sfc_rxq *rxq)
+{
+ rxq->state |= SFC_RXQ_FLUSH_FAILED;
+ rxq->state &= ~SFC_RXQ_FLUSHING;
+}
+
+static void
+sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
+{
+ unsigned int free_space;
+ unsigned int bulks;
+ void *objs[SFC_RX_REFILL_BULK];
+ efsys_dma_addr_t addr[RTE_DIM(objs)];
+ unsigned int added = rxq->added;
+ unsigned int id;
+ unsigned int i;
+ struct sfc_efx_rx_sw_desc *rxd;
+ struct rte_mbuf *m;
+ uint16_t port_id = rxq->dp.dpq.port_id;
+
+ free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) -
+ (added - rxq->completed);
+
+ if (free_space < rxq->refill_threshold)
+ return;
+
+ bulks = free_space / RTE_DIM(objs);
+ /* refill_threshold guarantees that bulks is positive */
+ SFC_ASSERT(bulks > 0);
+
+ id = added & rxq->ptr_mask;
+ do {
+ if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
+ RTE_DIM(objs)) < 0)) {
+ /*
+ * It is hardly a safe way to increment counter
+ * from different contexts, but all PMDs do it.
+ */
+ rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
+ RTE_DIM(objs);
+ /* Return if we have posted nothing yet */
+ if (added == rxq->added)
+ return;
+ /* Push posted */
+ break;
+ }
+
+ for (i = 0; i < RTE_DIM(objs);
+ ++i, id = (id + 1) & rxq->ptr_mask) {
+ m = objs[i];
+
+ rxd = &rxq->sw_desc[id];
+ rxd->mbuf = m;
+
+ SFC_ASSERT(rte_mbuf_refcnt_read(m) == 1);
+ m->data_off = RTE_PKTMBUF_HEADROOM;
+ SFC_ASSERT(m->next == NULL);
+ SFC_ASSERT(m->nb_segs == 1);
+ m->port = port_id;
+
+ addr[i] = rte_pktmbuf_mtophys(m);
+ }
+
+ efx_rx_qpost(rxq->common, addr, rxq->buf_size,
+ RTE_DIM(objs), rxq->completed, added);
+ added += RTE_DIM(objs);
+ } while (--bulks > 0);
+
+ SFC_ASSERT(added != rxq->added);
+ rxq->added = added;
+ efx_rx_qpush(rxq->common, added, &rxq->pushed);
+}
+
+static uint64_t
+sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
+{
+ uint64_t mbuf_flags = 0;
+
+ switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
+ case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
+ mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
+ break;
+ case EFX_PKT_IPV4:
+ mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
+ break;
+ default:
+ RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
+ SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
+ PKT_RX_IP_CKSUM_UNKNOWN);
+ break;
+ }
+
+ switch ((desc_flags &
+ (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
+ case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
+ case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
+ mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
+ break;
+ case EFX_PKT_TCP:
+ case EFX_PKT_UDP:
+ mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
+ break;
+ default:
+ RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
+ SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
+ PKT_RX_L4_CKSUM_UNKNOWN);
+ break;
+ }
+
+ return mbuf_flags;
+}
+
+static uint32_t
+sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
+{
+ return RTE_PTYPE_L2_ETHER |
+ ((desc_flags & EFX_PKT_IPV4) ?
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
+ ((desc_flags & EFX_PKT_IPV6) ?
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
+ ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
+ ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
+}
+
+static const uint32_t *
+sfc_efx_supported_ptypes_get(void)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ return ptypes;
+}
+
+static void
+sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
+ struct rte_mbuf *m)
+{
+#if EFSYS_OPT_RX_SCALE
+ uint8_t *mbuf_data;
+
+
+ if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
+ return;
+
+ mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
+
+ if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
+ m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
+ EFX_RX_HASHALG_TOEPLITZ,
+ mbuf_data);
+
+ m->ol_flags |= PKT_RX_RSS_HASH;
+ }
+#endif
+}
+
+static uint16_t
+sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_dp_rxq *dp_rxq = rx_queue;
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+ unsigned int completed;
+ unsigned int prefix_size = rxq->prefix_size;
+ unsigned int done_pkts = 0;
+ boolean_t discard_next = B_FALSE;
+ struct rte_mbuf *scatter_pkt = NULL;
+
+ if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
+ return 0;
+
+ sfc_ev_qpoll(rxq->evq);
+
+ completed = rxq->completed;
+ while (completed != rxq->pending && done_pkts < nb_pkts) {
+ unsigned int id;
+ struct sfc_efx_rx_sw_desc *rxd;
+ struct rte_mbuf *m;
+ unsigned int seg_len;
+ unsigned int desc_flags;
+
+ id = completed++ & rxq->ptr_mask;
+ rxd = &rxq->sw_desc[id];
+ m = rxd->mbuf;
+ desc_flags = rxd->flags;
+
+ if (discard_next)
+ goto discard;
+
+ if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
+ goto discard;
+
+ if (desc_flags & EFX_PKT_PREFIX_LEN) {
+ uint16_t tmp_size;
+ int rc __rte_unused;
+
+ rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
+ rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
+ SFC_ASSERT(rc == 0);
+ seg_len = tmp_size;
+ } else {
+ seg_len = rxd->size - prefix_size;
+ }
+
+ rte_pktmbuf_data_len(m) = seg_len;
+ rte_pktmbuf_pkt_len(m) = seg_len;
+
+ if (scatter_pkt != NULL) {
+ if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
+ rte_pktmbuf_free(scatter_pkt);
+ goto discard;
+ }
+ /* The packet to deliver */
+ m = scatter_pkt;
+ }
+
+ if (desc_flags & EFX_PKT_CONT) {
+ /* The packet is scattered, more fragments to come */
+ scatter_pkt = m;
+ /* Futher fragments have no prefix */
+ prefix_size = 0;
+ continue;
+ }
+
+ /* Scattered packet is done */
+ scatter_pkt = NULL;
+ /* The first fragment of the packet has prefix */
+ prefix_size = rxq->prefix_size;
+
+ m->ol_flags =
+ sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
+ m->packet_type =
+ sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
+
+ /*
+ * Extract RSS hash from the packet prefix and
+ * set the corresponding field (if needed and possible)
+ */
+ sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
+
+ m->data_off += prefix_size;
+
+ *rx_pkts++ = m;
+ done_pkts++;
+ continue;
+
+discard:
+ discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
+ rte_mempool_put(rxq->refill_mb_pool, m);
+ rxd->mbuf = NULL;
+ }
+
+ /* pending is only moved when entire packet is received */
+ SFC_ASSERT(scatter_pkt == NULL);
+
+ rxq->completed = completed;
+
+ sfc_efx_rx_qrefill(rxq);
+
+ return done_pkts;
+}
+
+static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
+static unsigned int
+sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
+ return 0;
+
+ sfc_ev_qpoll(rxq->evq);
+
+ return rxq->pending - rxq->completed;
+}
+
+struct sfc_rxq *
+sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
+{
+ const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
+ struct rte_eth_dev *eth_dev;
+ struct sfc_adapter *sa;
+ struct sfc_rxq *rxq;
+
+ SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
+ eth_dev = &rte_eth_devices[dpq->port_id];
+
+ sa = eth_dev->data->dev_private;
+
+ SFC_ASSERT(dpq->queue_id < sa->rxq_count);
+ rxq = sa->rxq_info[dpq->queue_id].rxq;
+
+ SFC_ASSERT(rxq != NULL);
+ return rxq;
+}
+
+static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
+static int
+sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr, int socket_id,
+ const struct sfc_dp_rx_qcreate_info *info,
+ struct sfc_dp_rxq **dp_rxqp)
+{
+ struct sfc_efx_rxq *rxq;
+ int rc;
+
+ rc = ENOMEM;
+ rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ goto fail_rxq_alloc;
+
+ sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
+ info->rxq_entries,
+ sizeof(*rxq->sw_desc),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq->sw_desc == NULL)
+ goto fail_desc_alloc;
+
+ /* efx datapath is bound to efx control path */
+ rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
+ if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
+ rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
+ rxq->ptr_mask = info->rxq_entries - 1;
+ rxq->batch_max = info->batch_max;
+ rxq->prefix_size = info->prefix_size;
+ rxq->refill_threshold = info->refill_threshold;
+ rxq->buf_size = info->buf_size;
+ rxq->refill_mb_pool = info->refill_mb_pool;
+
+ *dp_rxqp = &rxq->dp;
+ return 0;
+
+fail_desc_alloc:
+ rte_free(rxq);
+
+fail_rxq_alloc:
+ return rc;
+}
+
+static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
+static void
+sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ rte_free(rxq->sw_desc);
+ rte_free(rxq);
+}
+
+static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
+static int
+sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
+ __rte_unused unsigned int evq_read_ptr)
+{
+ /* libefx-based datapath is specific to libefx-based PMD */
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+ struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->common = crxq->common;
+
+ rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
+
+ sfc_efx_rx_qrefill(rxq);
+
+ rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
+
+ return 0;
+}
+
+static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
+static void
+sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
+ __rte_unused unsigned int *evq_read_ptr)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
+
+ /* libefx-based datapath is bound to libefx-based PMD and uses
+ * event queue structure directly. So, there is no necessity to
+ * return EvQ read pointer.
+ */
+}
+
+static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
+static void
+sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+ unsigned int i;
+ struct sfc_efx_rx_sw_desc *rxd;
+
+ for (i = rxq->completed; i != rxq->added; ++i) {
+ rxd = &rxq->sw_desc[i & rxq->ptr_mask];
+ rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
+ rxd->mbuf = NULL;
+ /* Packed stream relies on 0 in inactive SW desc.
+ * Rx queue stop is not performance critical, so
+ * there is no harm to do it always.
+ */
+ rxd->flags = 0;
+ rxd->size = 0;
+ }
+
+ rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
+}
+
+struct sfc_dp_rx sfc_efx_rx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EFX,
+ .type = SFC_DP_RX,
+ .hw_fw_caps = 0,
+ },
+ .features = SFC_DP_RX_FEAT_SCATTER,
+ .qcreate = sfc_efx_rx_qcreate,
+ .qdestroy = sfc_efx_rx_qdestroy,
+ .qstart = sfc_efx_rx_qstart,
+ .qstop = sfc_efx_rx_qstop,
+ .qpurge = sfc_efx_rx_qpurge,
+ .supported_ptypes_get = sfc_efx_supported_ptypes_get,
+ .qdesc_npending = sfc_efx_rx_qdesc_npending,
+ .pkt_burst = sfc_efx_recv_pkts,
+};
+
+unsigned int
+sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq *rxq;
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+ rxq = sa->rxq_info[sw_index].rxq;
+
+ if (rxq == NULL || (rxq->state & SFC_RXQ_STARTED) == 0)
+ return 0;
+
+ return sa->dp_rx->qdesc_npending(rxq->dp);
+}
+
+int
+sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset)
+{
+ struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+
+ return offset < rxq->evq->sa->dp_rx->qdesc_npending(dp_rxq);
+}
+
+static void
+sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq *rxq;
+ unsigned int retry_count;
+ unsigned int wait_count;
+
+ rxq = sa->rxq_info[sw_index].rxq;
+ SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
+
+ /*
+ * Retry Rx queue flushing in the case of flush failed or
+ * timeout. In the worst case it can delay for 6 seconds.
+ */
+ for (retry_count = 0;
+ ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
+ (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
+ ++retry_count) {
+ if (efx_rx_qflush(rxq->common) != 0) {
+ rxq->state |= SFC_RXQ_FLUSH_FAILED;
+ break;
+ }
+ rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
+ rxq->state |= SFC_RXQ_FLUSHING;
+
+ /*
+ * Wait for Rx queue flush done or failed event at least
+ * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
+ * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
+ * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
+ */
+ wait_count = 0;
+ do {
+ rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
+ sfc_ev_qpoll(rxq->evq);
+ } while ((rxq->state & SFC_RXQ_FLUSHING) &&
+ (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
+
+ if (rxq->state & SFC_RXQ_FLUSHING)
+ sfc_err(sa, "RxQ %u flush timed out", sw_index);
+
+ if (rxq->state & SFC_RXQ_FLUSH_FAILED)
+ sfc_err(sa, "RxQ %u flush failed", sw_index);
+
+ if (rxq->state & SFC_RXQ_FLUSHED)
+ sfc_info(sa, "RxQ %u flushed", sw_index);
+ }
+
+ sa->dp_rx->qpurge(rxq->dp);
+}
+
+static int
+sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
+{
+ boolean_t rss = (sa->rss_channels > 0) ? B_TRUE : B_FALSE;
+ struct sfc_port *port = &sa->port;
+ int rc;
+
+ /*
+ * If promiscuous or all-multicast mode has been requested, setting
+ * filter for the default Rx queue might fail, in particular, while
+ * running over PCI function which is not a member of corresponding
+ * privilege groups; if this occurs, few iterations will be made to
+ * repeat this step without promiscuous and all-multicast flags set
+ */
+retry:
+ rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, rss);
+ if (rc == 0)
+ return 0;
+ else if (rc != EOPNOTSUPP)
+ return rc;
+
+ if (port->promisc) {
+ sfc_warn(sa, "promiscuous mode has been requested, "
+ "but the HW rejects it");
+ sfc_warn(sa, "promiscuous mode will be disabled");
+
+ port->promisc = B_FALSE;
+ rc = sfc_set_rx_mode(sa);
+ if (rc != 0)
+ return rc;
+
+ goto retry;
+ }
+
+ if (port->allmulti) {
+ sfc_warn(sa, "all-multicast mode has been requested, "
+ "but the HW rejects it");
+ sfc_warn(sa, "all-multicast mode will be disabled");
+
+ port->allmulti = B_FALSE;
+ rc = sfc_set_rx_mode(sa);
+ if (rc != 0)
+ return rc;
+
+ goto retry;
+ }
+
+ return rc;
+}
+
+int
+sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_rxq *rxq;
+ struct sfc_evq *evq;
+ int rc;
+
+ sfc_log_init(sa, "sw_index=%u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+
+ rxq_info = &sa->rxq_info[sw_index];
+ rxq = rxq_info->rxq;
+ SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
+
+ evq = rxq->evq;
+
+ rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
+ if (rc != 0)
+ goto fail_ev_qstart;
+
+ rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
+ &rxq->mem, rxq_info->entries,
+ 0 /* not used on EF10 */, evq->common,
+ &rxq->common);
+ if (rc != 0)
+ goto fail_rx_qcreate;
+
+ efx_rx_qenable(rxq->common);
+
+ rc = sa->dp_rx->qstart(rxq->dp, evq->read_ptr);
+ if (rc != 0)
+ goto fail_dp_qstart;
+
+ rxq->state |= SFC_RXQ_STARTED;
+
+ if (sw_index == 0) {
+ rc = sfc_rx_default_rxq_set_filter(sa, rxq);
+ if (rc != 0)
+ goto fail_mac_filter_default_rxq_set;
+ }
+
+ /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
+ sa->eth_dev->data->rx_queue_state[sw_index] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+
+fail_mac_filter_default_rxq_set:
+ sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
+
+fail_dp_qstart:
+ sfc_rx_qflush(sa, sw_index);
+
+fail_rx_qcreate:
+ sfc_ev_qstop(evq);
+
+fail_ev_qstart:
+ return rc;
+}
+
+void
+sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_rxq *rxq;
+
+ sfc_log_init(sa, "sw_index=%u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+
+ rxq_info = &sa->rxq_info[sw_index];
+ rxq = rxq_info->rxq;
+
+ if (rxq->state == SFC_RXQ_INITIALIZED)
+ return;
+ SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
+
+ /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
+ sa->eth_dev->data->rx_queue_state[sw_index] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+
+ sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
+
+ if (sw_index == 0)
+ efx_mac_filter_default_rxq_clear(sa->nic);
+
+ sfc_rx_qflush(sa, sw_index);
+
+ rxq->state = SFC_RXQ_INITIALIZED;
+
+ efx_rx_qdestroy(rxq->common);
+
+ sfc_ev_qstop(rxq->evq);
+}
+
+static int
+sfc_rx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_rx_desc,
+ const struct rte_eth_rxconf *rx_conf)
+{
+ const uint16_t rx_free_thresh_max = EFX_RXQ_LIMIT(nb_rx_desc);
+ int rc = 0;
+
+ if (rx_conf->rx_thresh.pthresh != 0 ||
+ rx_conf->rx_thresh.hthresh != 0 ||
+ rx_conf->rx_thresh.wthresh != 0) {
+ sfc_err(sa,
+ "RxQ prefetch/host/writeback thresholds are not supported");
+ rc = EINVAL;
+ }
+
+ if (rx_conf->rx_free_thresh > rx_free_thresh_max) {
+ sfc_err(sa,
+ "RxQ free threshold too large: %u vs maximum %u",
+ rx_conf->rx_free_thresh, rx_free_thresh_max);
+ rc = EINVAL;
+ }
+
+ if (rx_conf->rx_drop_en == 0) {
+ sfc_err(sa, "RxQ drop disable is not supported");
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+static unsigned int
+sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
+{
+ uint32_t data_off;
+ uint32_t order;
+
+ /* The mbuf object itself is always cache line aligned */
+ order = rte_bsf32(RTE_CACHE_LINE_SIZE);
+
+ /* Data offset from mbuf object start */
+ data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
+ RTE_PKTMBUF_HEADROOM;
+
+ order = MIN(order, rte_bsf32(data_off));
+
+ return 1u << (order - 1);
+}
+
+static uint16_t
+sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
+ const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
+ uint16_t buf_size;
+ unsigned int buf_aligned;
+ unsigned int start_alignment;
+ unsigned int end_padding_alignment;
+
+ /* Below it is assumed that both alignments are power of 2 */
+ SFC_ASSERT(rte_is_power_of_2(nic_align_start));
+ SFC_ASSERT(rte_is_power_of_2(nic_align_end));
+
+ /*
+ * mbuf is always cache line aligned, double-check
+ * that it meets rx buffer start alignment requirements.
+ */
+
+ /* Start from mbuf pool data room size */
+ buf_size = rte_pktmbuf_data_room_size(mb_pool);
+
+ /* Remove headroom */
+ if (buf_size <= RTE_PKTMBUF_HEADROOM) {
+ sfc_err(sa,
+ "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
+ mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
+ return 0;
+ }
+ buf_size -= RTE_PKTMBUF_HEADROOM;
+
+ /* Calculate guaranteed data start alignment */
+ buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
+
+ /* Reserve space for start alignment */
+ if (buf_aligned < nic_align_start) {
+ start_alignment = nic_align_start - buf_aligned;
+ if (buf_size <= start_alignment) {
+ sfc_err(sa,
+ "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
+ mb_pool->name,
+ rte_pktmbuf_data_room_size(mb_pool),
+ RTE_PKTMBUF_HEADROOM, start_alignment);
+ return 0;
+ }
+ buf_aligned = nic_align_start;
+ buf_size -= start_alignment;
+ } else {
+ start_alignment = 0;
+ }
+
+ /* Make sure that end padding does not write beyond the buffer */
+ if (buf_aligned < nic_align_end) {
+ /*
+ * Estimate space which can be lost. If guarnteed buffer
+ * size is odd, lost space is (nic_align_end - 1). More
+ * accurate formula is below.
+ */
+ end_padding_alignment = nic_align_end -
+ MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
+ if (buf_size <= end_padding_alignment) {
+ sfc_err(sa,
+ "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
+ mb_pool->name,
+ rte_pktmbuf_data_room_size(mb_pool),
+ RTE_PKTMBUF_HEADROOM, start_alignment,
+ end_padding_alignment);
+ return 0;
+ }
+ buf_size -= end_padding_alignment;
+ } else {
+ /*
+ * Start is aligned the same or better than end,
+ * just align length.
+ */
+ buf_size = P2ALIGN(buf_size, nic_align_end);
+ }
+
+ return buf_size;
+}
+
+int
+sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ int rc;
+ uint16_t buf_size;
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_evq *evq;
+ struct sfc_rxq *rxq;
+ struct sfc_dp_rx_qcreate_info info;
+
+ rc = sfc_rx_qcheck_conf(sa, nb_rx_desc, rx_conf);
+ if (rc != 0)
+ goto fail_bad_conf;
+
+ buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
+ if (buf_size == 0) {
+ sfc_err(sa, "RxQ %u mbuf pool object size is too small",
+ sw_index);
+ rc = EINVAL;
+ goto fail_bad_conf;
+ }
+
+ if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
+ !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
+ sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
+ "object size is too small", sw_index);
+ sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
+ "PDU size %u plus Rx prefix %u bytes",
+ sw_index, buf_size, (unsigned int)sa->port.pdu,
+ encp->enc_rx_prefix_size);
+ rc = EINVAL;
+ goto fail_bad_conf;
+ }
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+ rxq_info = &sa->rxq_info[sw_index];
+
+ SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries);
+ rxq_info->entries = nb_rx_desc;
+ rxq_info->type =
+ sa->eth_dev->data->dev_conf.rxmode.enable_scatter ?
+ EFX_RXQ_TYPE_SCATTER : EFX_RXQ_TYPE_DEFAULT;
+
+ rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
+ rxq_info->entries, socket_id, &evq);
+ if (rc != 0)
+ goto fail_ev_qinit;
+
+ rc = ENOMEM;
+ rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (rxq == NULL)
+ goto fail_rxq_alloc;
+
+ rxq_info->rxq = rxq;
+
+ rxq->evq = evq;
+ rxq->hw_index = sw_index;
+ rxq->refill_threshold =
+ RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
+ rxq->refill_mb_pool = mb_pool;
+
+ rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
+ socket_id, &rxq->mem);
+ if (rc != 0)
+ goto fail_dma_alloc;
+
+ memset(&info, 0, sizeof(info));
+ info.refill_mb_pool = rxq->refill_mb_pool;
+ info.refill_threshold = rxq->refill_threshold;
+ info.buf_size = buf_size;
+ info.batch_max = encp->enc_rx_batch_max;
+ info.prefix_size = encp->enc_rx_prefix_size;
+
+#if EFSYS_OPT_RX_SCALE
+ if (sa->hash_support == EFX_RX_HASH_AVAILABLE && sa->rss_channels > 0)
+ info.flags |= SFC_RXQ_FLAG_RSS_HASH;
+#endif
+
+ info.rxq_entries = rxq_info->entries;
+ info.rxq_hw_ring = rxq->mem.esm_base;
+ info.evq_entries = rxq_info->entries;
+ info.evq_hw_ring = evq->mem.esm_base;
+ info.hw_index = rxq->hw_index;
+ info.mem_bar = sa->mem_bar.esb_base;
+
+ rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
+ &SFC_DEV_TO_PCI(sa->eth_dev)->addr,
+ socket_id, &info, &rxq->dp);
+ if (rc != 0)
+ goto fail_dp_rx_qcreate;
+
+ evq->dp_rxq = rxq->dp;
+
+ rxq->state = SFC_RXQ_INITIALIZED;
+
+ rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
+
+ return 0;
+
+fail_dp_rx_qcreate:
+ sfc_dma_free(sa, &rxq->mem);
+
+fail_dma_alloc:
+ rxq_info->rxq = NULL;
+ rte_free(rxq);
+
+fail_rxq_alloc:
+ sfc_ev_qfini(evq);
+
+fail_ev_qinit:
+ rxq_info->entries = 0;
+
+fail_bad_conf:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_rxq *rxq;
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+
+ rxq_info = &sa->rxq_info[sw_index];
+
+ rxq = rxq_info->rxq;
+ SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
+
+ sa->dp_rx->qdestroy(rxq->dp);
+ rxq->dp = NULL;
+
+ rxq_info->rxq = NULL;
+ rxq_info->entries = 0;
+
+ sfc_dma_free(sa, &rxq->mem);
+
+ sfc_ev_qfini(rxq->evq);
+ rxq->evq = NULL;
+
+ rte_free(rxq);
+}
+
+#if EFSYS_OPT_RX_SCALE
+efx_rx_hash_type_t
+sfc_rte_to_efx_hash_type(uint64_t rss_hf)
+{
+ efx_rx_hash_type_t efx_hash_types = 0;
+
+ if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER)) != 0)
+ efx_hash_types |= EFX_RX_HASH_IPV4;
+
+ if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
+ efx_hash_types |= EFX_RX_HASH_TCPIPV4;
+
+ if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0)
+ efx_hash_types |= EFX_RX_HASH_IPV6;
+
+ if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0)
+ efx_hash_types |= EFX_RX_HASH_TCPIPV6;
+
+ return efx_hash_types;
+}
+
+uint64_t
+sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types)
+{
+ uint64_t rss_hf = 0;
+
+ if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0)
+ rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER);
+
+ if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+
+ if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0)
+ rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX);
+
+ if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0)
+ rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX);
+
+ return rss_hf;
+}
+#endif
+
+static int
+sfc_rx_rss_config(struct sfc_adapter *sa)
+{
+ int rc = 0;
+
+#if EFSYS_OPT_RX_SCALE
+ if (sa->rss_channels > 0) {
+ rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
+ sa->rss_hash_types, B_TRUE);
+ if (rc != 0)
+ goto finish;
+
+ rc = efx_rx_scale_key_set(sa->nic, sa->rss_key,
+ sizeof(sa->rss_key));
+ if (rc != 0)
+ goto finish;
+
+ rc = efx_rx_scale_tbl_set(sa->nic, sa->rss_tbl,
+ sizeof(sa->rss_tbl));
+ }
+
+finish:
+#endif
+ return rc;
+}
+
+int
+sfc_rx_start(struct sfc_adapter *sa)
+{
+ unsigned int sw_index;
+ int rc;
+
+ sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
+
+ rc = efx_rx_init(sa->nic);
+ if (rc != 0)
+ goto fail_rx_init;
+
+ rc = sfc_rx_rss_config(sa);
+ if (rc != 0)
+ goto fail_rss_config;
+
+ for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
+ if ((!sa->rxq_info[sw_index].deferred_start ||
+ sa->rxq_info[sw_index].deferred_started)) {
+ rc = sfc_rx_qstart(sa, sw_index);
+ if (rc != 0)
+ goto fail_rx_qstart;
+ }
+ }
+
+ return 0;
+
+fail_rx_qstart:
+ while (sw_index-- > 0)
+ sfc_rx_qstop(sa, sw_index);
+
+fail_rss_config:
+ efx_rx_fini(sa->nic);
+
+fail_rx_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_rx_stop(struct sfc_adapter *sa)
+{
+ unsigned int sw_index;
+
+ sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
+
+ sw_index = sa->rxq_count;
+ while (sw_index-- > 0) {
+ if (sa->rxq_info[sw_index].rxq != NULL)
+ sfc_rx_qstop(sa, sw_index);
+ }
+
+ efx_rx_fini(sa->nic);
+}
+
+static int
+sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
+ unsigned int max_entries;
+
+ max_entries = EFX_RXQ_MAXNDESCS;
+ SFC_ASSERT(rte_is_power_of_2(max_entries));
+
+ rxq_info->max_entries = max_entries;
+
+ return 0;
+}
+
+static int
+sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
+{
+ int rc = 0;
+
+ switch (rxmode->mq_mode) {
+ case ETH_MQ_RX_NONE:
+ /* No special checks are required */
+ break;
+#if EFSYS_OPT_RX_SCALE
+ case ETH_MQ_RX_RSS:
+ if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) {
+ sfc_err(sa, "RSS is not available");
+ rc = EINVAL;
+ }
+ break;
+#endif
+ default:
+ sfc_err(sa, "Rx multi-queue mode %u not supported",
+ rxmode->mq_mode);
+ rc = EINVAL;
+ }
+
+ if (rxmode->header_split) {
+ sfc_err(sa, "Header split on Rx not supported");
+ rc = EINVAL;
+ }
+
+ if (rxmode->hw_vlan_filter) {
+ sfc_err(sa, "HW VLAN filtering not supported");
+ rc = EINVAL;
+ }
+
+ if (rxmode->hw_vlan_strip) {
+ sfc_err(sa, "HW VLAN stripping not supported");
+ rc = EINVAL;
+ }
+
+ if (rxmode->hw_vlan_extend) {
+ sfc_err(sa,
+ "Q-in-Q HW VLAN stripping not supported");
+ rc = EINVAL;
+ }
+
+ if (!rxmode->hw_strip_crc) {
+ sfc_warn(sa,
+ "FCS stripping control not supported - always stripped");
+ rxmode->hw_strip_crc = 1;
+ }
+
+ if (rxmode->enable_scatter &&
+ (~sa->dp_rx->features & SFC_DP_RX_FEAT_SCATTER)) {
+ sfc_err(sa, "Rx scatter not supported by %s datapath",
+ sa->dp_rx->dp.name);
+ rc = EINVAL;
+ }
+
+ if (rxmode->enable_lro) {
+ sfc_err(sa, "LRO not supported");
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+/**
+ * Destroy excess queues that are no longer needed after reconfiguration
+ * or complete close.
+ */
+static void
+sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
+{
+ int sw_index;
+
+ SFC_ASSERT(nb_rx_queues <= sa->rxq_count);
+
+ sw_index = sa->rxq_count;
+ while (--sw_index >= (int)nb_rx_queues) {
+ if (sa->rxq_info[sw_index].rxq != NULL)
+ sfc_rx_qfini(sa, sw_index);
+ }
+
+ sa->rxq_count = nb_rx_queues;
+}
+
+/**
+ * Initialize Rx subsystem.
+ *
+ * Called at device (re)configuration stage when number of receive queues is
+ * specified together with other device level receive configuration.
+ *
+ * It should be used to allocate NUMA-unaware resources.
+ */
+int
+sfc_rx_configure(struct sfc_adapter *sa)
+{
+ struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
+ const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
+ unsigned int sw_index;
+ int rc;
+
+ sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
+ nb_rx_queues, sa->rxq_count);
+
+ rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
+ if (rc != 0)
+ goto fail_check_mode;
+
+ if (nb_rx_queues == sa->rxq_count)
+ goto done;
+
+ if (sa->rxq_info == NULL) {
+ rc = ENOMEM;
+ sa->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
+ sizeof(sa->rxq_info[0]), 0,
+ sa->socket_id);
+ if (sa->rxq_info == NULL)
+ goto fail_rxqs_alloc;
+ } else {
+ struct sfc_rxq_info *new_rxq_info;
+
+ if (nb_rx_queues < sa->rxq_count)
+ sfc_rx_fini_queues(sa, nb_rx_queues);
+
+ rc = ENOMEM;
+ new_rxq_info =
+ rte_realloc(sa->rxq_info,
+ nb_rx_queues * sizeof(sa->rxq_info[0]), 0);
+ if (new_rxq_info == NULL && nb_rx_queues > 0)
+ goto fail_rxqs_realloc;
+
+ sa->rxq_info = new_rxq_info;
+ if (nb_rx_queues > sa->rxq_count)
+ memset(&sa->rxq_info[sa->rxq_count], 0,
+ (nb_rx_queues - sa->rxq_count) *
+ sizeof(sa->rxq_info[0]));
+ }
+
+ while (sa->rxq_count < nb_rx_queues) {
+ rc = sfc_rx_qinit_info(sa, sa->rxq_count);
+ if (rc != 0)
+ goto fail_rx_qinit_info;
+
+ sa->rxq_count++;
+ }
+
+#if EFSYS_OPT_RX_SCALE
+ sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
+ MIN(sa->rxq_count, EFX_MAXRSS) : 0;
+
+ if (sa->rss_channels > 0) {
+ for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
+ sa->rss_tbl[sw_index] = sw_index % sa->rss_channels;
+ }
+#endif
+
+done:
+ return 0;
+
+fail_rx_qinit_info:
+fail_rxqs_realloc:
+fail_rxqs_alloc:
+ sfc_rx_close(sa);
+
+fail_check_mode:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+/**
+ * Shutdown Rx subsystem.
+ *
+ * Called at device close stage, for example, before device shutdown.
+ */
+void
+sfc_rx_close(struct sfc_adapter *sa)
+{
+ sfc_rx_fini_queues(sa, 0);
+
+ sa->rss_channels = 0;
+
+ rte_free(sa->rxq_info);
+ sa->rxq_info = NULL;
+}
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_rx.h b/src/seastar/dpdk/drivers/net/sfc/sfc_rx.h
new file mode 100644
index 00000000..9e6282ea
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_rx.h
@@ -0,0 +1,180 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_RX_H
+#define _SFC_RX_H
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_ethdev.h>
+
+#include "efx.h"
+
+#include "sfc_dp_rx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct sfc_adapter;
+struct sfc_evq;
+
+/**
+ * Software Rx descriptor information associated with hardware Rx
+ * descriptor.
+ */
+struct sfc_efx_rx_sw_desc {
+ struct rte_mbuf *mbuf;
+ unsigned int flags;
+ unsigned int size;
+};
+
+/** Receive queue state bits */
+enum sfc_rxq_state_bit {
+ SFC_RXQ_INITIALIZED_BIT = 0,
+#define SFC_RXQ_INITIALIZED (1 << SFC_RXQ_INITIALIZED_BIT)
+ SFC_RXQ_STARTED_BIT,
+#define SFC_RXQ_STARTED (1 << SFC_RXQ_STARTED_BIT)
+ SFC_RXQ_FLUSHING_BIT,
+#define SFC_RXQ_FLUSHING (1 << SFC_RXQ_FLUSHING_BIT)
+ SFC_RXQ_FLUSHED_BIT,
+#define SFC_RXQ_FLUSHED (1 << SFC_RXQ_FLUSHED_BIT)
+ SFC_RXQ_FLUSH_FAILED_BIT,
+#define SFC_RXQ_FLUSH_FAILED (1 << SFC_RXQ_FLUSH_FAILED_BIT)
+};
+
+/**
+ * Receive queue control information.
+ * Allocated on the socket specified on the queue setup.
+ */
+struct sfc_rxq {
+ struct sfc_evq *evq;
+ efx_rxq_t *common;
+ efsys_mem_t mem;
+ unsigned int hw_index;
+ unsigned int refill_threshold;
+ struct rte_mempool *refill_mb_pool;
+ struct sfc_dp_rxq *dp;
+ unsigned int state;
+};
+
+static inline unsigned int
+sfc_rxq_sw_index_by_hw_index(unsigned int hw_index)
+{
+ return hw_index;
+}
+
+static inline unsigned int
+sfc_rxq_sw_index(const struct sfc_rxq *rxq)
+{
+ return sfc_rxq_sw_index_by_hw_index(rxq->hw_index);
+}
+
+struct sfc_rxq *sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq);
+
+/**
+ * Receive queue information used on libefx-based data path.
+ * Allocated on the socket specified on the queue setup.
+ */
+struct sfc_efx_rxq {
+ /* Used on data path */
+ struct sfc_evq *evq;
+ unsigned int flags;
+#define SFC_EFX_RXQ_FLAG_STARTED 0x1
+#define SFC_EFX_RXQ_FLAG_RUNNING 0x2
+#define SFC_EFX_RXQ_FLAG_RSS_HASH 0x4
+ unsigned int ptr_mask;
+ unsigned int pending;
+ unsigned int completed;
+ uint16_t batch_max;
+ uint16_t prefix_size;
+ struct sfc_efx_rx_sw_desc *sw_desc;
+
+ /* Used on refill */
+ unsigned int added;
+ unsigned int pushed;
+ unsigned int refill_threshold;
+ uint16_t buf_size;
+ struct rte_mempool *refill_mb_pool;
+ efx_rxq_t *common;
+
+ /* Datapath receive queue anchor */
+ struct sfc_dp_rxq dp;
+};
+
+static inline struct sfc_efx_rxq *
+sfc_efx_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
+{
+ return container_of(dp_rxq, struct sfc_efx_rxq, dp);
+}
+
+/**
+ * Receive queue information used during setup/release only.
+ * Allocated on the same socket as adapter data.
+ */
+struct sfc_rxq_info {
+ unsigned int max_entries;
+ unsigned int entries;
+ efx_rxq_type_t type;
+ struct sfc_rxq *rxq;
+ boolean_t deferred_start;
+ boolean_t deferred_started;
+};
+
+int sfc_rx_configure(struct sfc_adapter *sa);
+void sfc_rx_close(struct sfc_adapter *sa);
+int sfc_rx_start(struct sfc_adapter *sa);
+void sfc_rx_stop(struct sfc_adapter *sa);
+
+int sfc_rx_qinit(struct sfc_adapter *sa, unsigned int rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+void sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
+int sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index);
+void sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index);
+
+void sfc_rx_qflush_done(struct sfc_rxq *rxq);
+void sfc_rx_qflush_failed(struct sfc_rxq *rxq);
+
+unsigned int sfc_rx_qdesc_npending(struct sfc_adapter *sa,
+ unsigned int sw_index);
+int sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset);
+
+#if EFSYS_OPT_RX_SCALE
+efx_rx_hash_type_t sfc_rte_to_efx_hash_type(uint64_t rss_hf);
+uint64_t sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_RX_H */
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_tso.c b/src/seastar/dpdk/drivers/net/sfc/sfc_tso.c
new file mode 100644
index 00000000..fb79d749
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_tso.c
@@ -0,0 +1,201 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ip.h>
+#include <rte_tcp.h>
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_tx.h"
+#include "sfc_ev.h"
+
+/** Standard TSO header length */
+#define SFC_TSOH_STD_LEN 256
+
+/** The number of TSO option descriptors that precede the packet descriptors */
+#define SFC_TSO_OPDESCS_IDX_SHIFT 2
+
+int
+sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
+ unsigned int txq_entries, unsigned int socket_id)
+{
+ unsigned int i;
+
+ for (i = 0; i < txq_entries; ++i) {
+ sw_ring[i].tsoh = rte_malloc_socket("sfc-efx-txq-tsoh-obj",
+ SFC_TSOH_STD_LEN,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (sw_ring[i].tsoh == NULL)
+ goto fail_alloc_tsoh_objs;
+ }
+
+ return 0;
+
+fail_alloc_tsoh_objs:
+ while (i > 0)
+ rte_free(sw_ring[--i].tsoh);
+
+ return ENOMEM;
+}
+
+void
+sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
+ unsigned int txq_entries)
+{
+ unsigned int i;
+
+ for (i = 0; i < txq_entries; ++i) {
+ rte_free(sw_ring[i].tsoh);
+ sw_ring[i].tsoh = NULL;
+ }
+}
+
+static void
+sfc_efx_tso_prepare_header(struct sfc_efx_txq *txq, struct rte_mbuf **in_seg,
+ size_t *in_off, unsigned int idx, size_t bytes_left)
+{
+ struct rte_mbuf *m = *in_seg;
+ size_t bytes_to_copy = 0;
+ uint8_t *tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
+
+ do {
+ bytes_to_copy = MIN(bytes_left, m->data_len);
+
+ rte_memcpy(tsoh, rte_pktmbuf_mtod(m, uint8_t *),
+ bytes_to_copy);
+
+ bytes_left -= bytes_to_copy;
+ tsoh += bytes_to_copy;
+
+ if (bytes_left > 0) {
+ m = m->next;
+ SFC_ASSERT(m != NULL);
+ }
+ } while (bytes_left > 0);
+
+ if (bytes_to_copy == m->data_len) {
+ *in_seg = m->next;
+ *in_off = 0;
+ } else {
+ *in_seg = m;
+ *in_off = bytes_to_copy;
+ }
+}
+
+int
+sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
+ struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend,
+ unsigned int *pkt_descs, size_t *pkt_len)
+{
+ uint8_t *tsoh;
+ const struct tcp_hdr *th;
+ efsys_dma_addr_t header_paddr;
+ uint16_t packet_id;
+ uint32_t sent_seq;
+ struct rte_mbuf *m = *in_seg;
+ size_t nh_off = m->l2_len; /* IP header offset */
+ size_t tcph_off = m->l2_len + m->l3_len; /* TCP header offset */
+ size_t header_len = m->l2_len + m->l3_len + m->l4_len;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->evq->sa->nic);
+
+ idx += SFC_TSO_OPDESCS_IDX_SHIFT;
+
+ /* Packets which have too big headers should be discarded */
+ if (unlikely(header_len > SFC_TSOH_STD_LEN))
+ return EMSGSIZE;
+
+ /*
+ * The TCP header must start at most 208 bytes into the frame.
+ * If it starts later than this then the NIC won't realise
+ * it's a TCP packet and TSO edits won't be applied
+ */
+ if (unlikely(tcph_off > encp->enc_tx_tso_tcp_header_offset_limit))
+ return EMSGSIZE;
+
+ header_paddr = rte_pktmbuf_mtophys(m);
+
+ /*
+ * Sometimes headers may be split across multiple mbufs. In such cases
+ * we need to glue those pieces and store them in some temporary place.
+ * Also, packet headers must be contiguous in memory, so that
+ * they can be referred to with a single DMA descriptor. EF10 has no
+ * limitations on address boundaries crossing by DMA descriptor data.
+ */
+ if (m->data_len < header_len) {
+ sfc_efx_tso_prepare_header(txq, in_seg, in_off, idx,
+ header_len);
+ tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
+
+ header_paddr = rte_malloc_virt2phy((void *)tsoh);
+ } else {
+ if (m->data_len == header_len) {
+ *in_off = 0;
+ *in_seg = m->next;
+ } else {
+ *in_off = header_len;
+ }
+
+ tsoh = rte_pktmbuf_mtod(m, uint8_t *);
+ }
+
+ /* Handle IP header */
+ if (m->ol_flags & PKT_TX_IPV4) {
+ const struct ipv4_hdr *iphe4;
+
+ iphe4 = (const struct ipv4_hdr *)(tsoh + nh_off);
+ rte_memcpy(&packet_id, &iphe4->packet_id, sizeof(uint16_t));
+ packet_id = rte_be_to_cpu_16(packet_id);
+ } else if (m->ol_flags & PKT_TX_IPV6) {
+ packet_id = 0;
+ } else {
+ return EINVAL;
+ }
+
+ /* Handle TCP header */
+ th = (const struct tcp_hdr *)(tsoh + tcph_off);
+
+ rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
+ sent_seq = rte_be_to_cpu_32(sent_seq);
+
+ efx_tx_qdesc_tso2_create(txq->common, packet_id, sent_seq, m->tso_segsz,
+ *pend, EFX_TX_FATSOV2_OPT_NDESCS);
+
+ *pend += EFX_TX_FATSOV2_OPT_NDESCS;
+ *pkt_descs += EFX_TX_FATSOV2_OPT_NDESCS;
+
+ efx_tx_qdesc_dma_create(txq->common, header_paddr, header_len,
+ B_FALSE, (*pend)++);
+ (*pkt_descs)++;
+ *pkt_len -= header_len;
+
+ return 0;
+}
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_tweak.h b/src/seastar/dpdk/drivers/net/sfc/sfc_tweak.h
new file mode 100644
index 00000000..4ef7fc8b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_tweak.h
@@ -0,0 +1,56 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_TWEAK_H_
+#define _SFC_TWEAK_H_
+
+/*
+ * The header is intended to collect defines/constants which could be
+ * tweaked to improve the PMD performance characteristics depending on
+ * the usecase or requirements (CPU load, packet rate, latency).
+ */
+
+/**
+ * Number of Rx descriptors in the bulk submitted on Rx ring refill.
+ */
+#define SFC_RX_REFILL_BULK (RTE_CACHE_LINE_SIZE / sizeof(efx_qword_t))
+
+/**
+ * Make the transmit path reap at least one time per a burst;
+ * this improves cache locality because the same mbufs may be used to send
+ * subsequent bursts in certain cases because of well-timed reap
+ */
+#define SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE 0
+
+/** Default free threshold follows recommendations from DPDK documentation */
+#define SFC_TX_DEFAULT_FREE_THRESH 32
+
+#endif /* _SFC_TWEAK_H_ */
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_tx.c b/src/seastar/dpdk/drivers/net/sfc/sfc_tx.c
new file mode 100644
index 00000000..b8581d14
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_tx.c
@@ -0,0 +1,992 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+#include "sfc_tx.h"
+#include "sfc_tweak.h"
+#include "sfc_kvargs.h"
+
+/*
+ * Maximum number of TX queue flush attempts in case of
+ * failure or flush timeout
+ */
+#define SFC_TX_QFLUSH_ATTEMPTS (3)
+
+/*
+ * Time to wait between event queue polling attempts when waiting for TX
+ * queue flush done or flush failed events
+ */
+#define SFC_TX_QFLUSH_POLL_WAIT_MS (1)
+
+/*
+ * Maximum number of event queue polling attempts when waiting for TX queue
+ * flush done or flush failed events; it defines TX queue flush attempt timeout
+ * together with SFC_TX_QFLUSH_POLL_WAIT_MS
+ */
+#define SFC_TX_QFLUSH_POLL_ATTEMPTS (2000)
+
+static int
+sfc_tx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_tx_desc,
+ const struct rte_eth_txconf *tx_conf)
+{
+ unsigned int flags = tx_conf->txq_flags;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ int rc = 0;
+
+ if (tx_conf->tx_rs_thresh != 0) {
+ sfc_err(sa, "RS bit in transmit descriptor is not supported");
+ rc = EINVAL;
+ }
+
+ if (tx_conf->tx_free_thresh > EFX_TXQ_LIMIT(nb_tx_desc)) {
+ sfc_err(sa,
+ "TxQ free threshold too large: %u vs maximum %u",
+ tx_conf->tx_free_thresh, EFX_TXQ_LIMIT(nb_tx_desc));
+ rc = EINVAL;
+ }
+
+ if (tx_conf->tx_thresh.pthresh != 0 ||
+ tx_conf->tx_thresh.hthresh != 0 ||
+ tx_conf->tx_thresh.wthresh != 0) {
+ sfc_err(sa,
+ "prefetch/host/writeback thresholds are not supported");
+ rc = EINVAL;
+ }
+
+ if (((flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) &&
+ (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)) {
+ sfc_err(sa, "Multi-segment is not supported by %s datapath",
+ sa->dp_tx->dp.name);
+ rc = EINVAL;
+ }
+
+ if ((flags & ETH_TXQ_FLAGS_NOVLANOFFL) == 0) {
+ if (!encp->enc_hw_tx_insert_vlan_enabled) {
+ sfc_err(sa, "VLAN offload is not supported");
+ rc = EINVAL;
+ } else if (~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) {
+ sfc_err(sa,
+ "VLAN offload is not supported by %s datapath",
+ sa->dp_tx->dp.name);
+ rc = EINVAL;
+ }
+ }
+
+ if ((flags & ETH_TXQ_FLAGS_NOXSUMSCTP) == 0) {
+ sfc_err(sa, "SCTP offload is not supported");
+ rc = EINVAL;
+ }
+
+ /* We either perform both TCP and UDP offload, or no offload at all */
+ if (((flags & ETH_TXQ_FLAGS_NOXSUMTCP) == 0) !=
+ ((flags & ETH_TXQ_FLAGS_NOXSUMUDP) == 0)) {
+ sfc_err(sa, "TCP and UDP offloads can't be set independently");
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+void
+sfc_tx_qflush_done(struct sfc_txq *txq)
+{
+ txq->state |= SFC_TXQ_FLUSHED;
+ txq->state &= ~SFC_TXQ_FLUSHING;
+}
+
+int
+sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ struct sfc_txq_info *txq_info;
+ struct sfc_evq *evq;
+ struct sfc_txq *txq;
+ int rc = 0;
+ struct sfc_dp_tx_qcreate_info info;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ rc = sfc_tx_qcheck_conf(sa, nb_tx_desc, tx_conf);
+ if (rc != 0)
+ goto fail_bad_conf;
+
+ SFC_ASSERT(sw_index < sa->txq_count);
+ txq_info = &sa->txq_info[sw_index];
+
+ SFC_ASSERT(nb_tx_desc <= sa->txq_max_entries);
+ txq_info->entries = nb_tx_desc;
+
+ rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_TX, sw_index,
+ txq_info->entries, socket_id, &evq);
+ if (rc != 0)
+ goto fail_ev_qinit;
+
+ rc = ENOMEM;
+ txq = rte_zmalloc_socket("sfc-txq", sizeof(*txq), 0, socket_id);
+ if (txq == NULL)
+ goto fail_txq_alloc;
+
+ txq_info->txq = txq;
+
+ txq->hw_index = sw_index;
+ txq->evq = evq;
+ txq->free_thresh =
+ (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
+ SFC_TX_DEFAULT_FREE_THRESH;
+ txq->flags = tx_conf->txq_flags;
+
+ rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
+ socket_id, &txq->mem);
+ if (rc != 0)
+ goto fail_dma_alloc;
+
+ memset(&info, 0, sizeof(info));
+ info.free_thresh = txq->free_thresh;
+ info.flags = tx_conf->txq_flags;
+ info.txq_entries = txq_info->entries;
+ info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
+ info.txq_hw_ring = txq->mem.esm_base;
+ info.evq_entries = txq_info->entries;
+ info.evq_hw_ring = evq->mem.esm_base;
+ info.hw_index = txq->hw_index;
+ info.mem_bar = sa->mem_bar.esb_base;
+
+ rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
+ &SFC_DEV_TO_PCI(sa->eth_dev)->addr,
+ socket_id, &info, &txq->dp);
+ if (rc != 0)
+ goto fail_dp_tx_qinit;
+
+ evq->dp_txq = txq->dp;
+
+ txq->state = SFC_TXQ_INITIALIZED;
+
+ txq_info->deferred_start = (tx_conf->tx_deferred_start != 0);
+
+ return 0;
+
+fail_dp_tx_qinit:
+ sfc_dma_free(sa, &txq->mem);
+
+fail_dma_alloc:
+ txq_info->txq = NULL;
+ rte_free(txq);
+
+fail_txq_alloc:
+ sfc_ev_qfini(evq);
+
+fail_ev_qinit:
+ txq_info->entries = 0;
+
+fail_bad_conf:
+ sfc_log_init(sa, "failed (TxQ = %u, rc = %d)", sw_index, rc);
+ return rc;
+}
+
+void
+sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_txq_info *txq_info;
+ struct sfc_txq *txq;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->txq_count);
+ txq_info = &sa->txq_info[sw_index];
+
+ txq = txq_info->txq;
+ SFC_ASSERT(txq != NULL);
+ SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
+
+ sa->dp_tx->qdestroy(txq->dp);
+ txq->dp = NULL;
+
+ txq_info->txq = NULL;
+ txq_info->entries = 0;
+
+ sfc_dma_free(sa, &txq->mem);
+
+ sfc_ev_qfini(txq->evq);
+ txq->evq = NULL;
+
+ rte_free(txq);
+}
+
+static int
+sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ return 0;
+}
+
+static int
+sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
+{
+ int rc = 0;
+
+ switch (txmode->mq_mode) {
+ case ETH_MQ_TX_NONE:
+ break;
+ default:
+ sfc_err(sa, "Tx multi-queue mode %u not supported",
+ txmode->mq_mode);
+ rc = EINVAL;
+ }
+
+ /*
+ * These features are claimed to be i40e-specific,
+ * but it does make sense to double-check their absence
+ */
+ if (txmode->hw_vlan_reject_tagged) {
+ sfc_err(sa, "Rejecting tagged packets not supported");
+ rc = EINVAL;
+ }
+
+ if (txmode->hw_vlan_reject_untagged) {
+ sfc_err(sa, "Rejecting untagged packets not supported");
+ rc = EINVAL;
+ }
+
+ if (txmode->hw_vlan_insert_pvid) {
+ sfc_err(sa, "Port-based VLAN insertion not supported");
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+/**
+ * Destroy excess queues that are no longer needed after reconfiguration
+ * or complete close.
+ */
+static void
+sfc_tx_fini_queues(struct sfc_adapter *sa, unsigned int nb_tx_queues)
+{
+ int sw_index;
+
+ SFC_ASSERT(nb_tx_queues <= sa->txq_count);
+
+ sw_index = sa->txq_count;
+ while (--sw_index >= (int)nb_tx_queues) {
+ if (sa->txq_info[sw_index].txq != NULL)
+ sfc_tx_qfini(sa, sw_index);
+ }
+
+ sa->txq_count = nb_tx_queues;
+}
+
+int
+sfc_tx_configure(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
+ const unsigned int nb_tx_queues = sa->eth_dev->data->nb_tx_queues;
+ int rc = 0;
+
+ sfc_log_init(sa, "nb_tx_queues=%u (old %u)",
+ nb_tx_queues, sa->txq_count);
+
+ /*
+ * The datapath implementation assumes absence of boundary
+ * limits on Tx DMA descriptors. Addition of these checks on
+ * datapath would simply make the datapath slower.
+ */
+ if (encp->enc_tx_dma_desc_boundary != 0) {
+ rc = ENOTSUP;
+ goto fail_tx_dma_desc_boundary;
+ }
+
+ rc = sfc_tx_check_mode(sa, &dev_conf->txmode);
+ if (rc != 0)
+ goto fail_check_mode;
+
+ if (nb_tx_queues == sa->txq_count)
+ goto done;
+
+ if (sa->txq_info == NULL) {
+ sa->txq_info = rte_calloc_socket("sfc-txqs", nb_tx_queues,
+ sizeof(sa->txq_info[0]), 0,
+ sa->socket_id);
+ if (sa->txq_info == NULL)
+ goto fail_txqs_alloc;
+ } else {
+ struct sfc_txq_info *new_txq_info;
+
+ if (nb_tx_queues < sa->txq_count)
+ sfc_tx_fini_queues(sa, nb_tx_queues);
+
+ new_txq_info =
+ rte_realloc(sa->txq_info,
+ nb_tx_queues * sizeof(sa->txq_info[0]), 0);
+ if (new_txq_info == NULL && nb_tx_queues > 0)
+ goto fail_txqs_realloc;
+
+ sa->txq_info = new_txq_info;
+ if (nb_tx_queues > sa->txq_count)
+ memset(&sa->txq_info[sa->txq_count], 0,
+ (nb_tx_queues - sa->txq_count) *
+ sizeof(sa->txq_info[0]));
+ }
+
+ while (sa->txq_count < nb_tx_queues) {
+ rc = sfc_tx_qinit_info(sa, sa->txq_count);
+ if (rc != 0)
+ goto fail_tx_qinit_info;
+
+ sa->txq_count++;
+ }
+
+done:
+ return 0;
+
+fail_tx_qinit_info:
+fail_txqs_realloc:
+fail_txqs_alloc:
+ sfc_tx_close(sa);
+
+fail_check_mode:
+fail_tx_dma_desc_boundary:
+ sfc_log_init(sa, "failed (rc = %d)", rc);
+ return rc;
+}
+
+void
+sfc_tx_close(struct sfc_adapter *sa)
+{
+ sfc_tx_fini_queues(sa, 0);
+
+ rte_free(sa->txq_info);
+ sa->txq_info = NULL;
+}
+
+int
+sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct rte_eth_dev_data *dev_data;
+ struct sfc_txq_info *txq_info;
+ struct sfc_txq *txq;
+ struct sfc_evq *evq;
+ uint16_t flags;
+ unsigned int desc_index;
+ int rc = 0;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->txq_count);
+ txq_info = &sa->txq_info[sw_index];
+
+ txq = txq_info->txq;
+
+ SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
+
+ evq = txq->evq;
+
+ rc = sfc_ev_qstart(evq, sfc_evq_index_by_txq_sw_index(sa, sw_index));
+ if (rc != 0)
+ goto fail_ev_qstart;
+
+ /*
+ * It seems that DPDK has no controls regarding IPv4 offloads,
+ * hence, we always enable it here
+ */
+ if ((txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP) ||
+ (txq->flags & ETH_TXQ_FLAGS_NOXSUMUDP)) {
+ flags = EFX_TXQ_CKSUM_IPV4;
+ } else {
+ flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP;
+
+ if (sa->tso)
+ flags |= EFX_TXQ_FATSOV2;
+ }
+
+ rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem,
+ txq_info->entries, 0 /* not used on EF10 */,
+ flags, evq->common,
+ &txq->common, &desc_index);
+ if (rc != 0) {
+ if (sa->tso && (rc == ENOSPC))
+ sfc_err(sa, "ran out of TSO contexts");
+
+ goto fail_tx_qcreate;
+ }
+
+ efx_tx_qenable(txq->common);
+
+ txq->state |= SFC_TXQ_STARTED;
+
+ rc = sa->dp_tx->qstart(txq->dp, evq->read_ptr, desc_index);
+ if (rc != 0)
+ goto fail_dp_qstart;
+
+ /*
+ * It seems to be used by DPDK for debug purposes only ('rte_ether')
+ */
+ dev_data = sa->eth_dev->data;
+ dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+
+fail_dp_qstart:
+ txq->state = SFC_TXQ_INITIALIZED;
+ efx_tx_qdestroy(txq->common);
+
+fail_tx_qcreate:
+ sfc_ev_qstop(evq);
+
+fail_ev_qstart:
+ return rc;
+}
+
+void
+sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct rte_eth_dev_data *dev_data;
+ struct sfc_txq_info *txq_info;
+ struct sfc_txq *txq;
+ unsigned int retry_count;
+ unsigned int wait_count;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->txq_count);
+ txq_info = &sa->txq_info[sw_index];
+
+ txq = txq_info->txq;
+
+ if (txq->state == SFC_TXQ_INITIALIZED)
+ return;
+
+ SFC_ASSERT(txq->state & SFC_TXQ_STARTED);
+
+ sa->dp_tx->qstop(txq->dp, &txq->evq->read_ptr);
+
+ /*
+ * Retry TX queue flushing in case of flush failed or
+ * timeout; in the worst case it can delay for 6 seconds
+ */
+ for (retry_count = 0;
+ ((txq->state & SFC_TXQ_FLUSHED) == 0) &&
+ (retry_count < SFC_TX_QFLUSH_ATTEMPTS);
+ ++retry_count) {
+ if (efx_tx_qflush(txq->common) != 0) {
+ txq->state |= SFC_TXQ_FLUSHING;
+ break;
+ }
+
+ /*
+ * Wait for TX queue flush done or flush failed event at least
+ * SFC_TX_QFLUSH_POLL_WAIT_MS milliseconds and not more
+ * than 2 seconds (SFC_TX_QFLUSH_POLL_WAIT_MS multiplied
+ * by SFC_TX_QFLUSH_POLL_ATTEMPTS)
+ */
+ wait_count = 0;
+ do {
+ rte_delay_ms(SFC_TX_QFLUSH_POLL_WAIT_MS);
+ sfc_ev_qpoll(txq->evq);
+ } while ((txq->state & SFC_TXQ_FLUSHING) &&
+ wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS);
+
+ if (txq->state & SFC_TXQ_FLUSHING)
+ sfc_err(sa, "TxQ %u flush timed out", sw_index);
+
+ if (txq->state & SFC_TXQ_FLUSHED)
+ sfc_info(sa, "TxQ %u flushed", sw_index);
+ }
+
+ sa->dp_tx->qreap(txq->dp);
+
+ txq->state = SFC_TXQ_INITIALIZED;
+
+ efx_tx_qdestroy(txq->common);
+
+ sfc_ev_qstop(txq->evq);
+
+ /*
+ * It seems to be used by DPDK for debug purposes only ('rte_ether')
+ */
+ dev_data = sa->eth_dev->data;
+ dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED;
+}
+
+int
+sfc_tx_start(struct sfc_adapter *sa)
+{
+ unsigned int sw_index;
+ int rc = 0;
+
+ sfc_log_init(sa, "txq_count = %u", sa->txq_count);
+
+ if (sa->tso) {
+ if (!efx_nic_cfg_get(sa->nic)->enc_fw_assisted_tso_v2_enabled) {
+ sfc_warn(sa, "TSO support was unable to be restored");
+ sa->tso = B_FALSE;
+ }
+ }
+
+ rc = efx_tx_init(sa->nic);
+ if (rc != 0)
+ goto fail_efx_tx_init;
+
+ for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
+ if (!(sa->txq_info[sw_index].deferred_start) ||
+ sa->txq_info[sw_index].deferred_started) {
+ rc = sfc_tx_qstart(sa, sw_index);
+ if (rc != 0)
+ goto fail_tx_qstart;
+ }
+ }
+
+ return 0;
+
+fail_tx_qstart:
+ while (sw_index-- > 0)
+ sfc_tx_qstop(sa, sw_index);
+
+ efx_tx_fini(sa->nic);
+
+fail_efx_tx_init:
+ sfc_log_init(sa, "failed (rc = %d)", rc);
+ return rc;
+}
+
+void
+sfc_tx_stop(struct sfc_adapter *sa)
+{
+ unsigned int sw_index;
+
+ sfc_log_init(sa, "txq_count = %u", sa->txq_count);
+
+ sw_index = sa->txq_count;
+ while (sw_index-- > 0) {
+ if (sa->txq_info[sw_index].txq != NULL)
+ sfc_tx_qstop(sa, sw_index);
+ }
+
+ efx_tx_fini(sa->nic);
+}
+
+static void
+sfc_efx_tx_reap(struct sfc_efx_txq *txq)
+{
+ unsigned int completed;
+
+ sfc_ev_qpoll(txq->evq);
+
+ for (completed = txq->completed;
+ completed != txq->pending; completed++) {
+ struct sfc_efx_tx_sw_desc *txd;
+
+ txd = &txq->sw_ring[completed & txq->ptr_mask];
+
+ if (txd->mbuf != NULL) {
+ rte_pktmbuf_free(txd->mbuf);
+ txd->mbuf = NULL;
+ }
+ }
+
+ txq->completed = completed;
+}
+
+/*
+ * The function is used to insert or update VLAN tag;
+ * the firmware has state of the firmware tag to insert per TxQ
+ * (controlled by option descriptors), hence, if the tag of the
+ * packet to be sent is different from one remembered by the firmware,
+ * the function will update it
+ */
+static unsigned int
+sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m,
+ efx_desc_t **pend)
+{
+ uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN_PKT) ?
+ m->vlan_tci : 0);
+
+ if (this_tag == txq->hw_vlan_tci)
+ return 0;
+
+ /*
+ * The expression inside SFC_ASSERT() is not desired to be checked in
+ * a non-debug build because it might be too expensive on the data path
+ */
+ SFC_ASSERT(efx_nic_cfg_get(txq->evq->sa->nic)->enc_hw_tx_insert_vlan_enabled);
+
+ efx_tx_qdesc_vlantci_create(txq->common, rte_cpu_to_be_16(this_tag),
+ *pend);
+ (*pend)++;
+ txq->hw_vlan_tci = this_tag;
+
+ return 1;
+}
+
+static uint16_t
+sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_dp_txq *dp_txq = (struct sfc_dp_txq *)tx_queue;
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+ unsigned int added = txq->added;
+ unsigned int pushed = added;
+ unsigned int pkts_sent = 0;
+ efx_desc_t *pend = &txq->pend_desc[0];
+ const unsigned int hard_max_fill = EFX_TXQ_LIMIT(txq->ptr_mask + 1);
+ const unsigned int soft_max_fill = hard_max_fill - txq->free_thresh;
+ unsigned int fill_level = added - txq->completed;
+ boolean_t reap_done;
+ int rc __rte_unused;
+ struct rte_mbuf **pktp;
+
+ if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) == 0))
+ goto done;
+
+ /*
+ * If insufficient space for a single packet is present,
+ * we should reap; otherwise, we shouldn't do that all the time
+ * to avoid latency increase
+ */
+ reap_done = (fill_level > soft_max_fill);
+
+ if (reap_done) {
+ sfc_efx_tx_reap(txq);
+ /*
+ * Recalculate fill level since 'txq->completed'
+ * might have changed on reap
+ */
+ fill_level = added - txq->completed;
+ }
+
+ for (pkts_sent = 0, pktp = &tx_pkts[0];
+ (pkts_sent < nb_pkts) && (fill_level <= soft_max_fill);
+ pkts_sent++, pktp++) {
+ struct rte_mbuf *m_seg = *pktp;
+ size_t pkt_len = m_seg->pkt_len;
+ unsigned int pkt_descs = 0;
+ size_t in_off = 0;
+
+ /*
+ * Here VLAN TCI is expected to be zero in case if no
+ * DEV_TX_VLAN_OFFLOAD capability is advertised;
+ * if the calling app ignores the absence of
+ * DEV_TX_VLAN_OFFLOAD and pushes VLAN TCI, then
+ * TX_ERROR will occur
+ */
+ pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);
+
+ if (m_seg->ol_flags & PKT_TX_TCP_SEG) {
+ /*
+ * We expect correct 'pkt->l[2, 3, 4]_len' values
+ * to be set correctly by the caller
+ */
+ if (sfc_efx_tso_do(txq, added, &m_seg, &in_off, &pend,
+ &pkt_descs, &pkt_len) != 0) {
+ /* We may have reached this place for
+ * one of the following reasons:
+ *
+ * 1) Packet header length is greater
+ * than SFC_TSOH_STD_LEN
+ * 2) TCP header starts at more then
+ * 208 bytes into the frame
+ *
+ * We will deceive RTE saying that we have sent
+ * the packet, but we will actually drop it.
+ * Hence, we should revert 'pend' to the
+ * previous state (in case we have added
+ * VLAN descriptor) and start processing
+ * another one packet. But the original
+ * mbuf shouldn't be orphaned
+ */
+ pend -= pkt_descs;
+
+ rte_pktmbuf_free(*pktp);
+
+ continue;
+ }
+
+ /*
+ * We've only added 2 FATSOv2 option descriptors
+ * and 1 descriptor for the linearized packet header.
+ * The outstanding work will be done in the same manner
+ * as for the usual non-TSO path
+ */
+ }
+
+ for (; m_seg != NULL; m_seg = m_seg->next) {
+ efsys_dma_addr_t next_frag;
+ size_t seg_len;
+
+ seg_len = m_seg->data_len;
+ next_frag = rte_mbuf_data_dma_addr(m_seg);
+
+ /*
+ * If we've started TSO transaction few steps earlier,
+ * we'll skip packet header using an offset in the
+ * current segment (which has been set to the
+ * first one containing payload)
+ */
+ seg_len -= in_off;
+ next_frag += in_off;
+ in_off = 0;
+
+ do {
+ efsys_dma_addr_t frag_addr = next_frag;
+ size_t frag_len;
+
+ /*
+ * It is assumed here that there is no
+ * limitation on address boundary
+ * crossing by DMA descriptor.
+ */
+ frag_len = MIN(seg_len, txq->dma_desc_size_max);
+ next_frag += frag_len;
+ seg_len -= frag_len;
+ pkt_len -= frag_len;
+
+ efx_tx_qdesc_dma_create(txq->common,
+ frag_addr, frag_len,
+ (pkt_len == 0),
+ pend++);
+
+ pkt_descs++;
+ } while (seg_len != 0);
+ }
+
+ added += pkt_descs;
+
+ fill_level += pkt_descs;
+ if (unlikely(fill_level > hard_max_fill)) {
+ /*
+ * Our estimation for maximum number of descriptors
+ * required to send a packet seems to be wrong.
+ * Try to reap (if we haven't yet).
+ */
+ if (!reap_done) {
+ sfc_efx_tx_reap(txq);
+ reap_done = B_TRUE;
+ fill_level = added - txq->completed;
+ if (fill_level > hard_max_fill) {
+ pend -= pkt_descs;
+ break;
+ }
+ } else {
+ pend -= pkt_descs;
+ break;
+ }
+ }
+
+ /* Assign mbuf to the last used desc */
+ txq->sw_ring[(added - 1) & txq->ptr_mask].mbuf = *pktp;
+ }
+
+ if (likely(pkts_sent > 0)) {
+ rc = efx_tx_qdesc_post(txq->common, txq->pend_desc,
+ pend - &txq->pend_desc[0],
+ txq->completed, &txq->added);
+ SFC_ASSERT(rc == 0);
+
+ if (likely(pushed != txq->added))
+ efx_tx_qpush(txq->common, txq->added, pushed);
+ }
+
+#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
+ if (!reap_done)
+ sfc_efx_tx_reap(txq);
+#endif
+
+done:
+ return pkts_sent;
+}
+
+struct sfc_txq *
+sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq)
+{
+ const struct sfc_dp_queue *dpq = &dp_txq->dpq;
+ struct rte_eth_dev *eth_dev;
+ struct sfc_adapter *sa;
+ struct sfc_txq *txq;
+
+ SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
+ eth_dev = &rte_eth_devices[dpq->port_id];
+
+ sa = eth_dev->data->dev_private;
+
+ SFC_ASSERT(dpq->queue_id < sa->txq_count);
+ txq = sa->txq_info[dpq->queue_id].txq;
+
+ SFC_ASSERT(txq != NULL);
+ return txq;
+}
+
+static sfc_dp_tx_qcreate_t sfc_efx_tx_qcreate;
+static int
+sfc_efx_tx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr,
+ int socket_id,
+ const struct sfc_dp_tx_qcreate_info *info,
+ struct sfc_dp_txq **dp_txqp)
+{
+ struct sfc_efx_txq *txq;
+ struct sfc_txq *ctrl_txq;
+ int rc;
+
+ rc = ENOMEM;
+ txq = rte_zmalloc_socket("sfc-efx-txq", sizeof(*txq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL)
+ goto fail_txq_alloc;
+
+ sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ txq->pend_desc = rte_calloc_socket("sfc-efx-txq-pend-desc",
+ EFX_TXQ_LIMIT(info->txq_entries),
+ sizeof(*txq->pend_desc), 0,
+ socket_id);
+ if (txq->pend_desc == NULL)
+ goto fail_pend_desc_alloc;
+
+ rc = ENOMEM;
+ txq->sw_ring = rte_calloc_socket("sfc-efx-txq-sw_ring",
+ info->txq_entries,
+ sizeof(*txq->sw_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq->sw_ring == NULL)
+ goto fail_sw_ring_alloc;
+
+ ctrl_txq = sfc_txq_by_dp_txq(&txq->dp);
+ if (ctrl_txq->evq->sa->tso) {
+ rc = sfc_efx_tso_alloc_tsoh_objs(txq->sw_ring,
+ info->txq_entries, socket_id);
+ if (rc != 0)
+ goto fail_alloc_tsoh_objs;
+ }
+
+ txq->evq = ctrl_txq->evq;
+ txq->ptr_mask = info->txq_entries - 1;
+ txq->free_thresh = info->free_thresh;
+ txq->dma_desc_size_max = info->dma_desc_size_max;
+
+ *dp_txqp = &txq->dp;
+ return 0;
+
+fail_alloc_tsoh_objs:
+ rte_free(txq->sw_ring);
+
+fail_sw_ring_alloc:
+ rte_free(txq->pend_desc);
+
+fail_pend_desc_alloc:
+ rte_free(txq);
+
+fail_txq_alloc:
+ return rc;
+}
+
+static sfc_dp_tx_qdestroy_t sfc_efx_tx_qdestroy;
+static void
+sfc_efx_tx_qdestroy(struct sfc_dp_txq *dp_txq)
+{
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+
+ sfc_efx_tso_free_tsoh_objs(txq->sw_ring, txq->ptr_mask + 1);
+ rte_free(txq->sw_ring);
+ rte_free(txq->pend_desc);
+ rte_free(txq);
+}
+
+static sfc_dp_tx_qstart_t sfc_efx_tx_qstart;
+static int
+sfc_efx_tx_qstart(struct sfc_dp_txq *dp_txq,
+ __rte_unused unsigned int evq_read_ptr,
+ unsigned int txq_desc_index)
+{
+ /* libefx-based datapath is specific to libefx-based PMD */
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+ struct sfc_txq *ctrl_txq = sfc_txq_by_dp_txq(dp_txq);
+
+ txq->common = ctrl_txq->common;
+
+ txq->pending = txq->completed = txq->added = txq_desc_index;
+ txq->hw_vlan_tci = 0;
+
+ txq->flags |= (SFC_EFX_TXQ_FLAG_STARTED | SFC_EFX_TXQ_FLAG_RUNNING);
+
+ return 0;
+}
+
+static sfc_dp_tx_qstop_t sfc_efx_tx_qstop;
+static void
+sfc_efx_tx_qstop(struct sfc_dp_txq *dp_txq,
+ __rte_unused unsigned int *evq_read_ptr)
+{
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+
+ txq->flags &= ~SFC_EFX_TXQ_FLAG_RUNNING;
+}
+
+static sfc_dp_tx_qreap_t sfc_efx_tx_qreap;
+static void
+sfc_efx_tx_qreap(struct sfc_dp_txq *dp_txq)
+{
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+ unsigned int txds;
+
+ sfc_efx_tx_reap(txq);
+
+ for (txds = 0; txds <= txq->ptr_mask; txds++) {
+ if (txq->sw_ring[txds].mbuf != NULL) {
+ rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
+ txq->sw_ring[txds].mbuf = NULL;
+ }
+ }
+
+ txq->flags &= ~SFC_EFX_TXQ_FLAG_STARTED;
+}
+
+struct sfc_dp_tx sfc_efx_tx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EFX,
+ .type = SFC_DP_TX,
+ .hw_fw_caps = 0,
+ },
+ .features = SFC_DP_TX_FEAT_VLAN_INSERT |
+ SFC_DP_TX_FEAT_TSO |
+ SFC_DP_TX_FEAT_MULTI_SEG,
+ .qcreate = sfc_efx_tx_qcreate,
+ .qdestroy = sfc_efx_tx_qdestroy,
+ .qstart = sfc_efx_tx_qstart,
+ .qstop = sfc_efx_tx_qstop,
+ .qreap = sfc_efx_tx_qreap,
+ .pkt_burst = sfc_efx_xmit_pkts,
+};
diff --git a/src/seastar/dpdk/drivers/net/sfc/sfc_tx.h b/src/seastar/dpdk/drivers/net/sfc/sfc_tx.h
new file mode 100644
index 00000000..6c3ac3b6
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/sfc/sfc_tx.h
@@ -0,0 +1,164 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_TX_H
+#define _SFC_TX_H
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+
+#include "efx.h"
+
+#include "sfc_dp_tx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct sfc_adapter;
+struct sfc_evq;
+
+/**
+ * Software Tx descriptor information associated with hardware Tx
+ * descriptor.
+ */
+struct sfc_efx_tx_sw_desc {
+ struct rte_mbuf *mbuf;
+ uint8_t *tsoh; /* Buffer to store TSO header */
+};
+
+enum sfc_txq_state_bit {
+ SFC_TXQ_INITIALIZED_BIT = 0,
+#define SFC_TXQ_INITIALIZED (1 << SFC_TXQ_INITIALIZED_BIT)
+ SFC_TXQ_STARTED_BIT,
+#define SFC_TXQ_STARTED (1 << SFC_TXQ_STARTED_BIT)
+ SFC_TXQ_FLUSHING_BIT,
+#define SFC_TXQ_FLUSHING (1 << SFC_TXQ_FLUSHING_BIT)
+ SFC_TXQ_FLUSHED_BIT,
+#define SFC_TXQ_FLUSHED (1 << SFC_TXQ_FLUSHED_BIT)
+};
+
+/**
+ * Transmit queue control information. Not used on datapath.
+ * Allocated on the socket specified on the queue setup.
+ */
+struct sfc_txq {
+ unsigned int state;
+ unsigned int hw_index;
+ struct sfc_evq *evq;
+ efsys_mem_t mem;
+ struct sfc_dp_txq *dp;
+ efx_txq_t *common;
+ unsigned int free_thresh;
+ unsigned int flags;
+};
+
+static inline unsigned int
+sfc_txq_sw_index_by_hw_index(unsigned int hw_index)
+{
+ return hw_index;
+}
+
+static inline unsigned int
+sfc_txq_sw_index(const struct sfc_txq *txq)
+{
+ return sfc_txq_sw_index_by_hw_index(txq->hw_index);
+}
+
+struct sfc_txq *sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq);
+
+/**
+ * Transmit queue information used on libefx-based data path.
+ * Allocated on the socket specified on the queue setup.
+ */
+struct sfc_efx_txq {
+ struct sfc_evq *evq;
+ struct sfc_efx_tx_sw_desc *sw_ring;
+ unsigned int ptr_mask;
+ efx_desc_t *pend_desc;
+ efx_txq_t *common;
+ unsigned int added;
+ unsigned int pending;
+ unsigned int completed;
+ unsigned int free_thresh;
+ uint16_t hw_vlan_tci;
+ uint16_t dma_desc_size_max;
+
+ unsigned int hw_index;
+ unsigned int flags;
+#define SFC_EFX_TXQ_FLAG_STARTED 0x1
+#define SFC_EFX_TXQ_FLAG_RUNNING 0x2
+
+ /* Datapath transmit queue anchor */
+ struct sfc_dp_txq dp;
+};
+
+static inline struct sfc_efx_txq *
+sfc_efx_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
+{
+ return container_of(dp_txq, struct sfc_efx_txq, dp);
+}
+
+struct sfc_txq_info {
+ unsigned int entries;
+ struct sfc_txq *txq;
+ boolean_t deferred_start;
+ boolean_t deferred_started;
+};
+
+int sfc_tx_configure(struct sfc_adapter *sa);
+void sfc_tx_close(struct sfc_adapter *sa);
+
+int sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
+
+void sfc_tx_qflush_done(struct sfc_txq *txq);
+int sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index);
+void sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index);
+int sfc_tx_start(struct sfc_adapter *sa);
+void sfc_tx_stop(struct sfc_adapter *sa);
+
+/* From 'sfc_tso.c' */
+int sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
+ unsigned int txq_entries,
+ unsigned int socket_id);
+void sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
+ unsigned int txq_entries);
+int sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
+ struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend,
+ unsigned int *pkt_descs, size_t *pkt_len);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_TX_H */